ip link 로 device 를 namespace 로 보내면 Host 에서 해당 device 는 볼 수 가 없다.
이를 다시 Host 로 원복하는 방법



## Host 서버의 eno2 를 qrouter namespace 에 넣고 ip 세팅
$ sudo ip netns
qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc
qdhcp-03a6de58-9693-4c41-9577-9307c8750141

## eno2 를 네임스페이스로 보내기
$ sudo ip link set eno2 netns qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc
$ sudo ip netns exec qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc ip a
4: eno2: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether 3c:a8:2a:20:ed:d1 brd ff:ff:ff:ff:ff:ff

$ sudo ip netns exec qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc ip addr add 192.168.130.100/24 dev eno2
$ sudo ip netns exec qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc ifconfig eno2 up



## qrouter namespace 에 있는 eno2 를 Host 로 다시 돌려 보내기

$ ip netns exec qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc ip link set eno2 netns 1










저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn
## https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/cephfs

[ ceph-admin 노드에서 ]
$ ssh ceph@192.168.30.22

## kubes Pool 생성
$ ceph osd pool create kubes 128

## kube user 생성
$ ceph auth get-or-create client.kube mon 'allow r' \
osd 'allow class-read object_prefix rbd_children, allow rwx pool=kubes'


[client.kube]
    key = AQCt/BpYigJ7MRAA5vy+cl39EsKpY3C+tXEGrA==

## kube user 에 대한 secret key 생성 및 조회
$ ceph auth get-or-create client.kube
AQCt/BpYigJ7MRAA5vy+cl39EsKpY3C+tXEGrA==


## kube-node01, kube-node02 서버에 kube key 와 ceph.conf 추가
$ ssh stack@192.168.30.15 sudo mkdir -p /etc/ceph
$ ceph auth get-or-create client.kube | ssh stack@192.168.30.15 sudo tee /etc/ceph/ceph.client.kube.keyring
$ cat /etc/ceph/ceph.conf | ssh stack@192.168.30.15 sudo tee /etc/ceph/ceph.conf
$ ssh stack@192.168.30.15 sudo chown -R stack.stack /etc/ceph

$ ssh stack@192.168.30.16 sudo mkdir -p /etc/ceph
$ ceph auth get-or-create client.kube | ssh stack@192.168.30.16 sudo tee /etc/ceph/ceph.client.kube.keyring
$ cat /etc/ceph/ceph.conf | ssh stack@192.168.30.16 sudo tee /etc/ceph/ceph.conf
$ ssh stack@192.168.30.16 sudo chown -R stack.stack /etc/ceph



[ kube-node01, kube-node02 에 접속 ]
## ceph rbd client (ceph-common) 와 ceph fs client 설치 (ceph-fs-common)
$ sudo apt-get -y install ceph-common ceph-fs-common



########################################
## ceph rbd 로 연결하는 방식
########################################

## https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/rbd
## https://github.com/ceph/ceph-docker/tree/master/examples/kubernetes

[ ceph-admin 노드에서 ]

## kube keyring 파일 넣기
$ sudo vi /etc/ceph/ceph.client.kube.keyring
[client.kube]
    key = AQCt/BpYigJ7MRAA5vy+cl39EsKpY3C+tXEGrA==


## rbd 이미지 생성
## http://karan-mj.blogspot.kr/2013/12/ceph-installation-part-3.html

$ rbd create ceph-rbd-test --pool kubes --name client.kube --size 1G -k /etc/ceph/ceph.client.kube.keyring

$ rbd list --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring
$ rbd -p kubes ls


## Jewel 의 새기능은 현재 대부분의 OS 에서 mount 문제가 있어 image 기능을 제거 해야 함
$ rbd feature disable ceph-rbd-test fast-diff --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring
$ rbd feature disable ceph-rbd-test deep-flatten --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring
$ rbd feature disable ceph-rbd-test object-map --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring
$ rbd feature disable ceph-rbd-test exclusive-lock --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring

$ rbd info ceph-rbd-test --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring
$ rbd --image ceph-rbd-test -p kubes info

$ rbd remove ceph-rbd-test --pool kubes --name client.kube -k /etc/ceph/ceph.client.kube.keyring


## secret yaml 을 만들기 위해 key 를 base64 로 인코딩 함
$ grep key /etc/ceph/ceph.client.kube.keyring |awk '{printf "%s", $NF}'|base64
QVFDdC9CcFlpZ0o3TVJBQTV2eStjbDM5RXNLcFkzQyt0WEVHckE9PQ==




[ kube-deploy 접속 ]

## secret key 를 pod 로 생성하여 접속
$ vi ~/kube/ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
data:
  key: QVFDdC9CcFlpZ0o3TVJBQTV2eStjbDM5RXNLcFkzQyt0WEVHckE9PQ==

$ scp ~/kube/ceph-secret.yaml kube-master01:~/kube/.
$ ssh kube-master01 "kubectl create -f ~/kube/ceph-secret.yaml"
$ kubectl -s http://kube-master01:8080 get secrets


## rbd-with-secret pod 생성해서 rbd 활용
$ vi ~/kube/rbd-with-secret.yml
apiVersion: v1
kind: Pod
metadata:
  name: rbd2
spec:
  containers:
  - image: gcr.io/google_containers/busybox
    command:
    - sleep
    - "3600"
    imagePullPolicy: IfNotPresent
    name: rbd-rw-busybox
    volumeMounts:
    - mountPath: "/mnt/rbd"
      name: rbdpd
  volumes:
  - name: rbdpd
    rbd:
      monitors:
      - 192.168.30.23:6789
      - 192.168.30.24:6789
      - 192.168.30.25:6789
      pool: kubes
      image: ceph-rbd-test
      user: kube
      keyring: /etc/ceph/ceph.client.kube.keyring
      secretRef:
        name: ceph-secret
      fsType: ext4
      readOnly: false


$ scp ~/kube/rbd-with-secret.yml kube-master01:~/kube/.
$ ssh kube-master01 "kubectl create -f ~/kube/rbd-with-secret.yml"
$ kubectl -s http://kube-master01:8080 get pods




## rbd 연결 확인
$ kubectl -s http://kube-master01:8080 describe pods rbd2
$ kubectl -s http://kube-master01:8080 exec -it rbd2 -- df -h



[ kube-node02 접속하여 ]

$ docker ps
$ docker inspect --format '{{ .Mounts }}' 4c4070a1393b

## 혹은
$ mount |grep kub
/dev/rbd0 on /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/kubes-image-ceph-rbd-test type ext4 (rw,relatime,stripe=1024,data=ordered)
/dev/rbd0 on /var/lib/kubelet/pods/061973fc-a265-11e6-940f-5cb9018c67dc/volumes/kubernetes.io~rbd/rbdpd type ext4 (rw,relatime,stripe=1024,data=ordered)




[ kube-deploy 접속해서 ]

## secret key pod 를 사용하지 않고 keyring 으로만 rbd pod 생성
$ vi ~/kube/rbd.yml
apiVersion: v1
kind: Pod
metadata:
  name: rbd
spec:
  containers:
  - image: gcr.io/google_containers/busybox
    command:
    - sleep
    - "3600"
    imagePullPolicy: IfNotPresent
    name: rbd-rw-busybox
    volumeMounts:
    - mountPath: "/mnt/rbd"
      name: rbdpd
  volumes:
  - name: rbdpd
    rbd:
      monitors:
      - 192.168.30.23:6789
      - 192.168.30.24:6789
      - 192.168.30.25:6789
      pool: kubes
      image: ceph-rbd-test
      user: kube
      keyring: /etc/ceph/ceph.client.kube.keyring
      fsType: ext4
      readOnly: false


$ scp ~/kube/rbd.yml kube-master01:~/kube/.
$ ssh kube-master01 "kubectl create -f ~/kube/rbd.yml"
$ kubectl -s http://kube-master01:8080 get pods

## rbd 연결 확인

$ kubectl -s http://kube-master01:8080 exec -it rbd -- df -h 


저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn


저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn

kubernetes 의 Authentication 과 Authorization 활용


[ Authentication ]

  • client-ca-file 로 접속
  • static password file 사용
  • static token file 사용
  • OpenStack Keystone 사용
먼저 client-ca-file 은 Authorization 을 사용할 수 없으니 제외

static password 도 잘됨, 그러나 Authorization 을 연동해 보지는 않았음.

OpenStack Keystone 연계는 아직 알파버전이고 수정이 자주 일어나서 아직 소스까지 볼 단계는 아니라 생략.

static token 방식은 Authorization 과도 잘 연동되므로 이걸 활용



## uuid generate

$ cat /proc/sys/kernel/random/uuid



## {{uuid}} 는 위에서 제너레이션 된 값으로 대치

$ sudo vi /etc/default/kube-token

{{uuid}},admin,1

{{uuid}},ahnsk,2,"tfabric,group1"

{{uuid}},stack,3,tfabric



## api 서버에 token file 옵션 추가

$ sudo chown stack.root /etc/default/kube-token

--token-auth-file=/etc/default/kube-token \


$ sudo systemctl restart kube-apiserver.service


$ kubectl -s https://kube-master01:6443 --token={{uuid}} get node





[ Authorization ]

  • ABAC Mode
  • RBAC Mode
RBAC 는 아직 알파라 베타인 ABAC 를 활용

## 전체 admin : admin,   tfabric admin : ahnsk,      tfabric readOnly user : stack
kubectl 이 api version 을 체크하기 때문에 무조건 nonResourcePath 도 all 로 지정해야 함

$ sudo vi /etc/default/kube-rbac.json
{"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"system:serviceaccount:kube-system:default","namespace":"*","resource":"*","apiGroup":"*", "nonResourcePath": "*"}}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*", "nonResourcePath": "*" }}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"ahnsk", "namespace": "tfabric", "resource": "*", "apiGroup": "*", "nonResourcePath": "*" }}
{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"stack", "namespace": "tfabric", "resource": "*", "apiGroup": "*", "readonly": true, "nonResourcePath": "*"}}



kube-system 이 kube-apiserver 에 접근하기 위해서는 1라인이 반드시 있어야 함





$ sudo vi /etc/default/kube-apiserver

--authorization-mode=ABAC \

--authorization-policy-file=/etc/default/kube-rbac.json \


$ sudo systemctl restart kube-apiserver.service




$ cd ~/kube

$ vi busybox-tfabric.yaml

apiVersion: v1

kind: Pod

metadata:

  name: busybox

  namespace: tfabric

spec:

  containers:

  - image: gcr.io/google_containers/busybox

    command:

      - sleep

      - "3600"

    imagePullPolicy: IfNotPresent

    name: busybox

  restartPolicy: Always



$ kubectl -s https://kube-master01:6443 --token={{uuid}} --v=8 version


token 지정을 매번 하기 귀찮으니 config context 를 활용하는 것이 좋음.

이건 다음에....















저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn

## https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt


보통 cpu.cfs_period_us 는 cpu.cfs_quota 와 같이 사용되어 계산되는데 계산법은 다음과 같다. (단위 : microsecond)

1 CPU 사용률 = cpu.cfs_quota_us / cpu.cfs_period_us * 100

cgoup에서 1cpu 의 20% 를 사용한다면 아래와 세팅하면 된다.

# echo 10000 > cpu.cfs_quota_us /* quota = 10ms */
# echo 50000 > cpu.cfs_period_us /* period = 50ms */


Kubernetes 에선 limit resources 로 cpu 와 memory 를 다음과 같이 할당하다.
--limits='cpu=200m,memory=512Mi'

cpu 는 200m 로 할당하였는데 이는 cpu.cfs_period_us 값으로 1 sec (1000 millisecond) 기준당 0.2 sec 를 할당한다는 의미이다. (최대값은 1sec == 1000000 microsecond 이므로 1000000 임)
보통 cgroup의 디폴트 값은 cpu.cfs_quota=-1 이므로 위의 같은 경우는 20% 의 cpu를 할당하는 의미이다. 




저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn
###################################
## kube-dns (skydns) 설치, dashboard 설치
###################################
## 검색창에 [kubernetes] 을 넣고 검색을 하면 됨


$ cd ~/kube
$ export KUBE_ROOT=/home/ubuntu/go_workspace/src/k8s.io/kubernetes
$ export DNS_REPLICAS=1
$ export DNS_DOMAIN=cluster.local
$ export DNS_SERVER_IP=192.168.30.200

$ sed -e "s/\\\$DNS_REPLICAS/${DNS_REPLICAS}/g;\
s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g;" \
"${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.sed" > skydns-rc.yaml

## skydns-rc.yaml 에 kube-master-url 추가
$ vi skydns-rc.yaml
81         - --kube-master-url=http://192.168.30.13:8080


$ sed -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" \
"${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.sed" > skydns-svc.yaml

$ cat <<EOF >namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: kube-system
EOF


$ cp ~/go_workspace/src/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml ~/kube/.
$ cp ~/go_workspace/src/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-service.yaml ~/kube/.


## kube-master01 에 복사
$ ssh kube-master01 "mkdir -p kube"
$ scp ~/kube/skydns-rc.yaml kube-master01:~/kube/.
$ scp ~/kube/skydns-svc.yaml kube-master01:~/kube/.
$ scp ~/kube/namespace.yaml kube-master01:~/kube/.

$ scp ~/kube/dashboard-controller.yaml kube-master01:~/kube/.
$ scp ~/kube/dashboard-service.yaml kube-master01:~/kube/.

$ ssh kube-master01 "kubectl create -f ~/kube/namespace.yaml"
$ ssh kube-master01 "kubectl --namespace=kube-system create -f ~/kube/skydns-rc.yaml"
$ ssh kube-master01 "kubectl --namespace=kube-system create -f ~/kube/skydns-svc.yaml"



## dashboard 설치
$ cd ~/kube

## master api 에 접속할 수 있게 정보를 추가
$ vi kubernetes-dashboard.yaml
47         - --apiserver-host=http://192.168.30.13:8080

$ scp ~/kube/kubernetes-dashboard.yaml kube-master01:~/kube/.
$ ssh kube-master01 "kubectl create -f ~/kube/kubernetes-dashboard.yaml" 



## skydns 설치 확인
$ vi ~/kube/busybox.yaml
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: gcr.io/google_containers/busybox
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
    name: busybox
  restartPolicy: Always


$ scp ~/kube/busybox.yaml kube-master01:~/kube/.
$ ssh kube-master01 "kubectl create -f ~/kube/busybox.yaml"


## -s 옵션으로 api 서버 지정 가능
$ kubectl -s http://kube-master01:8080 get pods --all-namespaces -o wide
$ kubectl -s http://kube-master01:8080 describe pod kube-dns-v19-d1tse --namespace=kube-system

$ kubectl -s http://kube-master01:8080 get pods busybox -o wide
$ kubectl -s http://kube-master01:8080 exec busybox -- nslookup kubernetes.default




###################################
## kubectl 사용법
###################################

## nginx pod 생성  (replication controller 대신 replica set 이 생겼음)
$ kubectl -s http://kube-master01:8080 run nginx --image=nginx (--replicas=2) --port=80
$ kubectl -s http://kube-master01:8080 get pods
$ kubectl -s http://kube-master01:8080 get pods -o wide
$ kubectl -s http://kube-master01:8080 get pod -l run=nginx


## nginx scaling
$ kubectl -s http://kube-master01:8080 scale deployment/nginx --replicas=1

## nginx auto scaling
$ kubectl -s http://kube-master01:8080 autoscale deployment/nginx --min=1 --max=3


## nginx rc 조회
$ kubectl -s http://kube-master01:8080 get rs



## 서비스 멈춤없이 이미지 버전 올리기 (edit 로 파일 수정하면 됨)
$ kubectl -s http://kube-master01:8080 edit deployment/nginx


## nginx service 생성 (port : host에 노출되는 port,   target-port : docker 내부에서 뜨는 port)
$ kubectl -s http://kube-master01:8080 expose deployment nginx --port=8080 --target-port=80 (--type=LoadBalancer) --name=nginx
$ kubectl -s http://kube-master01:8080 get services


## nginx pod 삭제
$ kubectl -s http://kube-master01:8080 get deployment (nginx)
$ kubectl -s http://kube-master01:8080 delete deployment nginx

## nginx service 삭제
$ kubectl -s http://kube-master01:8080 delete service nginx

## nginx pod, service 동시 삭제
$ kubectl -s http://kube-master01:8080 delete deployment,service nginx
$ kubectl -s http://kube-master01:8080 delete deployments/nginx services/nginx

## nginx pod, service 동시 삭제 (label 활용 : -l or --selector)
$ kubectl -s http://kube-master01:8080 delete deployment,services -l app=nginx


## nginx replication controller 삭제 (디폴트는 pod 도 삭제됨, --cascade=false : rc 만 삭제) 
$ kubectl -s http://kube-master01:8080 delete rc nginx-rc
$ kubectl -s http://kube-master01:8080 delete rc nginx-rc --cascade=false


## nginx 안으로 들어가기
$ kubectl -s http://kube-master01:8080 exec -it nginx-3449338310-sl1ou -- /bin/bash

## pod 내의 컨테이너 로그 보기
$ kubectl -s http://kube-master01:8080 logs -f nginx-3449338310-sl1ou
$ kubectl -s http://kube-master01:8080 logs --tail=20 nginx-3449338310-sl1ou
$ kubectl -s http://kube-master01:8080 logs --since=1m nginx-3449338310-sl1ou




## Guestbook (redis-master, redis-slave, frontend) 샘플
$ ~/go_workspace/src/k8s.io/kubernetes
$ kubectl -s http://kube-master01:8080 create -f examples/guestbook/


## Service  frontend 타입을 ClusterIP 에서 NodePort 로 변경
$ kubectl -s http://kube-master01:8080 edit services/frontend
28   type: NodePort


## json format 으로 output 보기
$ kubectl -s http://kube-master01:8080 get svc frontend -o json
$ kubectl -s http://kube-master01:8080 get svc frontend -o "jsonpath={.spec.ports[0].nodePort}{"\n"}"


## Guestbook 삭제
$ kubectl -s http://kube-master01:8080 delete deployments,services -l "app in (redis, guestbook)"



## 명령어로 pod 생성하는 방법
$ kubectl -s http://kube-master01:8080 run frontend --image=gcr.io/google-samples/gb-frontend:v4 \
--env="GET_HOSTS_FROM=dns" --port=80 --replicas=3 \
--limits="cpu=100m,memory=100Mi" \
--labels="app=guestbook,tier=frontend"

## Service frontend 를 명령어로 NodePort 타입으로 생성
$ kubectl -s http://kube-master01:8080 expose deployment frontend \
--port=80 --target-port=80 --name=frontend --type=NodePort \
--labels=app=guestbook,tier=frontend --selector=app=guestbook,tier=frontend





###################################
## glusterFS 설치 및 Kubernetes 에서 활용
###################################
## glusterFS 설치

## gluster01 과 gluster02 모두 수행
# vi /etc/hosts
192.168.30.15   kube-node01 gluster01
192.168.30.16   kube-node02 gluster02

# mkfs.xfs -f /dev/sdb
# mkdir -p /data/gluster/brick1 && chmod -R 777 /data
# echo '/dev/sdb /data/gluster/brick1 xfs defaults 1 2' >> /etc/fstab
# mount -a && mount

# apt-get install -y glusterfs-server glusterfs-client
# service glusterfs-server start


## gluster01 에서
# gluster peer probe gluster02

## gluster02 에서
# gluster peer probe gluster01


## gluster01 과 gluster02 모두 수행
# mkdir -p /data/gluster/brick1/gv0 && chmod -R 777 /data
# mkdir -p /data/gluster/brick1/gv1 && chmod -R 777 /data/gluster/brick1/gv1


## gluster01 에서 수행 (gv1 은 pod 에서 glusterfs 연결할 때 사용할 디스크)
# gluster volume create gv0 replica 2 gluster01:/data/gluster/brick1/gv0 gluster02:/data/gluster/brick1/gv0
# gluster volume start gv0

# gluster volume create gv1 replica 2 gluster01:/data/gluster/brick1/gv1 gluster02:/data/gluster/brick1/gv1
# gluster volume start gv1


## gluster01 과 gluster02 모두 수행
# mkdir -p /ext && chmod 777 -R /ext
# mount -t glusterfs gluster01:/gv0 /ext



## kubernetes 와 glusterFS 테스트
$ cd ~/go_workspace/src/k8s.io/kubernetes
$ cp examples/volumes/glusterfs/*.json ~/kube && cd ~/kube

$ vi glusterfs-endpoints.json
11           "ip": "192.168.30.15"
23           "ip": "192.168.30.16"

$ vi glusterfs-pod.json
11                 "image": "nginx"
25                     "path": "gv1",
26                     "readOnly": false


$ ssh kube-master01 "mkdir -p kube"
$ scp ~/kube/glusterfs-endpoints.json kube-master01:~/kube/.
$ scp ~/kube/glusterfs-service.json kube-master01:~/kube/.
$ scp ~/kube/glusterfs-pod.json kube-master01:~/kube/.

$ ssh kube-master01 "kubectl create -f ~/kube/glusterfs-endpoints.json"
$ ssh kube-master01 "kubectl create -f ~/kube/glusterfs-service.json"
$ ssh kube-master01 "kubectl create -f ~/kube/glusterfs-pod.json"


$ kubectl -s http://kube-master01:8080 get pods glusterfs -o jsonpath='{.status.hostIP}{"\n"}'

$ ssh kube-node01 "mount | grep gv1"






저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn

10.0.0.171    kafka01 zookeeper01

10.0.0.172    kafka02 zookeeper02

10.0.0.173    kafka03 zookeeper03


192.168.30.171    kafka01 zookeeper01

192.168.30.172    kafka02 zookeeper02

192.168.30.173    kafka03 zookeeper03


## kafka vm 생성

$ openstack flavor create --id ka1 --ram 8192 --disk 160 --vcpus 2 kafka


$ openstack server create --image 7498cf9d-bd2e-4401-9ae9-ca72120272ed \

       --flavor ka1  --nic net-id=03a6de58-9693-4c41-9577-9307c8750141,v4-fixed-ip=10.0.0.171 \

       --key-name magnum-key --security-group default kafka01

$ openstack ip floating create --floating-ip-address 192.168.30.171 public

$ openstack ip floating add 192.168.30.171 kafka01





## Oracle Java 8 설치

$ sudo add-apt-repository ppa:webupd8team/java

$ sudo apt-get update

$ sudo apt-get install oracle-java8-installer


## 여러 버전을 Java 를 설치했을 때 관리

$ sudo update-alternatives --config java





## zookeeper 설치

## https://zookeeper.apache.org/doc/r3.4.9/zookeeperStarted.html

## http://apache.mirror.cdnetworks.com/zookeeper/zookeeper-3.4.9/


$ mkdir -p downloads && cd downloads

$ wget http://apache.mirror.cdnetworks.com/zookeeper/zookeeper-3.4.9/zookeeper-3.4.9.tar.gz


$ sudo tar -C /usr/local -xzvf zookeeper-3.4.9.tar.gz

$ cd /usr/local

$ sudo ln -s zookeeper-3.4.9/ zookeeper


$ vi /usr/local/zookeeper/conf/zoo.cfg

tickTime=2000

dataDir=/var/lib/zookeeper

clientPort=2181

initLimit=5

syncLimit=2

server.1=zookeeper01:2888:3888

server.2=zookeeper02:2888:3888

server.3=zookeeper03:2888:3888


$ vi /usr/local/zookeeper/bin/zkEnv.sh

56     ZOO_LOG_DIR="/var/log/zookeeper"


$ sudo mkdir -p /var/log/zookeeper && sudo chown -R stack.stack /var/log/zookeeper


## zookeeper myid 는 서버마다 지정

$ sudo mkdir -p /var/lib/zookeeper && sudo chown -R stack.stack /var/lib/zookeeper

$ vi /var/lib/zookeeper/myid

1


$ vi ~/.bashrc

export JAVA_HOME=/usr/lib/jvm/java-8-oracle

export ZOOKEEPER_HOME=/usr/local/zookeeper

PATH=$PATH:$ZOOKEEPER_HOME/bin


$ . ~/.bashrc

$ zkServer.sh start


## zookeeper 설치 확인

$ zkCli.sh -server zookeeper01:2181





## Kafka 설치

## https://www.digitalocean.com/community/tutorials/how-to-install-apache-kafka-on-ubuntu-14-04


## https://kafka.apache.org/downloads.html

## https://kafka.apache.org/documentation.html


$ cd downloads

$ wget http://apache.mirror.cdnetworks.com/kafka/0.10.0.1/kafka_2.11-0.10.0.1.tgz

$ sudo tar -C /usr/local -xzvf kafka_2.11-0.10.0.1.tgz

$ cd /usr/local && sudo chown -R stack.stack kafka_2.11-0.10.0.1

$ sudo ln -s kafka_2.11-0.10.0.1/ kafka


## broker id 는 서버마다 고유하게 줘야 함

$ vi /usr/local/kafka/config/server.properties

20 broker.id=0

56 log.dirs=/var/lib/kafka

112 zookeeper.connect=zookeeper01:2181,zookeeper02:2181,zookeeper03:2181

117 delete.topic.enable = true


$ sudo mkdir -p /var/lib/kafka && sudo chown -R stack.stack /var/lib/kafka

$ sudo mkdir -p /var/log/kafka && sudo chown -R stack.stack /var/log/kafka


$ vi ~/.bashrc

export KAFKA_HOME=/usr/local/kafka

PATH=$PATH:$KAFKA_HOME/bin


$ . ~/.bashrc

$ nohup kafka-server-start.sh $KAFKA_HOME/config/server.properties > /var/log/kafka/kafka.log 2>&1 &


## kafkaT : kafka cluster 관리

$ sudo apt-get -y install ruby ruby-dev build-essential

$ sudo gem install kafkat --source https://rubygems.org --no-ri --no-rdoc

$ vi ~/.kafkatcfg

{

  "kafka_path": "/usr/local/kafka",

  "log_path": "/var/lib/kafka",

  "zk_path": "zookeeper01:2181,zookeeper02:2181,zookeeper03:2181"

}


## kafka partition 보기

$ kafkat partitions


## kafka data 테스트

$ echo "Hello, World" | kafka-console-producer.sh --broker-list kafka01:9092,kafka02:9092,kafka03:9092 --topic TutorialTopic > /dev/null

$ kafka-console-consumer.sh --zookeeper zookeeper01:2181,zookeeper02:2181,zookeeper03:2181 --topic TutorialTopic --from-beginning


$ kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

$ kafka-topics.sh --list --zookeeper localhost:2181


$ kafka-console-producer.sh --broker-list localhost:9092 --topic test

This is a message

This is another message


$ kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning


## Replica 3 테스트

$ kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 3 --partitions 1 --topic my-replicated-topic

$ kafka-topics.sh --describe --zookeeper localhost:2181 --topic my-replicated-topic

Topic:my-replicated-topic    PartitionCount:1    ReplicationFactor:3    Configs:

    Topic: my-replicated-topic    Partition: 0    Leader: 0    Replicas: 0,2,1    Isr: 0,2,1


$ kafka-console-producer.sh --broker-list localhost:9092 --topic my-replicated-topic

my test message 1

my test message 2

^C


$ kafka-console-consumer.sh --zookeeper localhost:2181 --from-beginning --topic my-replicated-topic


## 서버 한대 다운

$ kafka-server-stop.sh


$ kafka-topics.sh --describe --zookeeper localhost:2181 --topic my-replicated-topic

Topic:my-replicated-topic    PartitionCount:1    ReplicationFactor:3    Configs:

    Topic: my-replicated-topic    Partition: 0    Leader: 0    Replicas: 0,2,1    Isr: 0,1


$ kafka-console-consumer.sh --zookeeper localhost:2181 --from-beginning --topic my-replicated-topic


## 토픽 삭제

$ kafka-topics.sh --delete --zookeeper localhost:2181 --topic my-replicated-topic

$ kafka-topics.sh --delete --zookeeper localhost:2181 --topic TutorialTopic









저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn

## OpenStack CLI 를 사용할 때 현재 어떤 프로젝트와 사용자인지를 알려주는 Prompt 만들기


## 오픈스택 사용자를 위한 프롬프트 설정  (project:user) 로 표시됨

$ vi ~/.bashrc


openstack_user() {

  env | grep -E 'OS_USERNAME|OS_PROJECT_NAME' 2> /dev/null | sed -e 's/OS_PROJECT_NAME=\(.*\)/(\1/' -e 's/OS_USERNAME=\(.*\)/\1)/' | paste -sd ":"

}


PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]$(openstack_user)\$ '





$ . demo/demo-openrc

(demo:demo)$ openstack server list






저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn

## ansbile 스크립트 만들 때 참고 명령어

## 변수 보기

$ ansible -i hosts -m debug -a "var=hostvars['kube-master01']" localhost
$ ansible -i hosts -m debug -a "var=groups['kube-masters']" localhost

## 하나의 task 만 수행 (restart flannel)
$ ansible-playbook -i hosts --start-at-task='restart flannel' 03-flannel.yml

## task 조회
$ ansible-playbook -i hosts 03-flannel.yml --list-tasks


## 모든 소스는 github 에 ...



[ 모든 node 에서 수행 ]

$ sudo su -
# vi /etc/ssh/sshd_config
28 PermitRootLogin yes
52 PasswordAuthentication yes

# systemctl restart sshd
# passwd
# passwd stack


$ sudo apt-get install python2.7 python-minimal

$ sudo apt-get update
$ sudo apt-get install -y ansible

## ntp ntpdate 패키지를 설치해서 ntp 를 세팅해야 함




[ git init 설정 후 github 에 올리기 ]
$ cd ~/kubernetes-ansible
$ git init
$ git remote add origin https://github.com/seungkyua/kubernetes-ansible.git
$ git pull origin master
$ git config user.name "Seungkyu Ahn"
$ git config user.email "seungkyua@gmail.com"
$ git add -A
$ git commit -a -m "Intial commit"
$ git push --set-upstream origin master




[ Prerequisite ]
$ vi README.md
# Prerequisite #

 - This ansible-playbook is tested in Ubuntu 16.04 LTS
 - Need one Kubernetes deploy node (a.k.a kube-deploy)
 - Login from kube-deploy to all Kubernetes nodes by `root` user without password using hostname
 - kube-deploy should have swap memory over 2G byte
 - Every work should be executed by `stack` user at kube-deploy
 - Every nodes should be installed ansible and python packages

```
$ sudo apt-get update
$ sudo apt-get install -y ansible python2.7 python-minimal
```

 - Group names and node names can not be changed in `hosts` file

```
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16
```

 - Have to changed your own password at `add_user_password` field in `group_vars/all` file


## Tips ##

An encrypted password can figure out following command

```
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512
[ input password and enter]
```


## Execute order ##

```
$ sudo ansible-playbook -i hosts 00-create-user.yml
$ sudo ansible-playbook -i hosts 00-install-package.yml
$ sudo ansible-playbook -i hosts 01-install-docker.yml
$ sudo chown -R stack.stack ~/.ansible
$ ansible-playbook -i hosts 02-etcd.yml
$ ansible-playbook -i hosts 03-flannel.yml
$ ansible-playbook -i hosts 04-kubernetes.yml
```


## restart service ##

 - restart docker

```
$ sudo ansible-playbook -i hosts --tags="restart docker" 01-install-docker.yml
```

 - restart etcd

```
$ ansible-playbook -i hosts --tags="restart etcd" 02-etcd.yml
```

 - restart flannel

```
$ ansible-playbook -i hosts --tags="restart flannel" 03-flannel.yml
```

 - restart kubernetes

```
$ ansible-playbook -i hosts --tags="restart kube-apiserver,restart kube-controller-manager,restart kube-scheduler,restart kube-proxy,restart kubelet" 04-kubernetes.yml
```



[ kube-deploy node 에서 수행 ]
## kube-deploy node 접속
$ ssh -i ~/magnum-key.pem stack@192.168.30.138
$ ssh-keygen -t rsa
$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id kube-deploy
# ssh-copy-id kube-master01
# ssh-copy-id kube-master02
# ssh-copy-id kube-node01
# ssh-copy-id kube-node02


$ mkdir -p ~/kubernetes-ansible && cd ~/kubernetes-ansible
$ vi hosts
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16


$ vi ansible.cfg
[defaults]
host_key_checking = False



## password encrypt 값을 알아냄
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512



## 환경 변수 세팅
$ mkdir -p group_vars && vi group_vars/all
ansible_dir: "kubernetes-ansible"
gopath_dir: "go_workspace"
add_user: "stack"
add_user_password: "gernerated password here !"
ubuntu_release: "xenial"
kube_deploy_uname_r: "4.4.0-22-generic"
uname_r: "4.4.0-21-generic"
etcd_data_dir: "/ext/data/etcd"
flannel_version: "v0.6.1"
flannel_net: "172.16.0.0/16"
mtu_size: "1500"
kube_version: "release-1.4"
kube_api_ip: "192.168.30.13"
service_cluster_ip_range: "192.168.30.192/26"
service_node_port_range: "30000-32767"
cluster_dns: "192.168.30.200"
cluster_domain: "cluster.local" 
 



$ mkdir -p files && vi files/hosts
127.0.0.1       localhost

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts


10.0.0.18           kube-depoly
192.168.30.138  kube-deploy
192.168.30.13    kube-master01
192.168.30.14    kube-master02
192.168.30.15    kube-node01
192.168.30.16    kube-node02




## user 생성 (stack), key 자동 복사, sudo user 등록, 환경변수 세팅, host 파일 복사
$ 00-create-user.yml
---
- name: create the user
  hosts: all
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Add the {{ add_user }} user
      user: name={{ add_user }} groups=sudo createhome=yes shell=/bin/bash
            password={{ add_user_password }} append=yes
            generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa

    - name: Set up authorized_keys for the {{ add_user }}
      authorized_key: user={{ add_user }} key="{{ lookup('file', '/home/{{ add_user }}/.ssh/id_rsa.pub') }}"

    - name: sudo 
      lineinfile:
        "dest=/etc/sudoers state=present regexp='^{{ add_user }} ALL='
         line='{{ add_user }} ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'"

    - name: export GOPATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export GOPATH' line='export GOPATH=$HOME/{{ gopath_dir }}:$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes/Godeps/_workspace'"

    - name: export PATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export PATH'
         line='export PATH=$HOME/{{ gopath_dir }}/bin:$PATH'"

    - name: export KUBE_ROOT
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export KUBE_ROOT'
         line='export KUBE_ROOT=$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes'"

    - name: Copy hosts file
      copy:
        src: "files/hosts"
        dest: "/etc"
        owner: root


sudo ansible-playbook -i hosts 00-create-user.yml




## apt-get package 설치
$ vi 00-install-package.yml
---
- name: install package
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Install apt packages
      apt: name={{ item }}  update_cache=yes
      with_items:
        - bridge-utils
        - linux-libc-dev
        - golang
        - gcc
        - curl
        - git


sudo ansible-playbook -i hosts 00-install-package.yml



## docker install
$ vi 01-install-docker.yml
---
# This playbook setup docker package

- hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  roles:
    - docker




$ mkdir -p roles/docker/files && vi roles/docker/files/docker.xenial.list
deb https://apt.dockerproject.org/repo ubuntu-xenial main


$ vi roles/docker/files/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/etc/default/docker
ExecStart=/usr/bin/docker daemon $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target



$ vi roles/docker/files/docker
DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock"



$ mkdir -p roles/docker/tasks && vi roles/docker/tasks/main.yml
---
- name: install apt-transport-https ca-certificates
  apt: name={{ item }}
  with_items:
    - apt-transport-https
    - ca-certificates

- name: add GPG key
  apt_key: keyserver=hkp://p80.pool.sks-keyservers.net:80 \
           id=58118E89F3A912897C070ADBF76221572C52609D

- name: add docker.list
  copy:
    src: "docker.{{ ubuntu_release }}.list"
    dest: "/etc/apt/sources.list.d"
    owner: root

- name: apt-get update
  apt: update_cache=yes

- name: install linux-image-extra kube-deploy
  apt: name=linux-image-extra-{{ kube_depoloy_uname_r }}
  when: "'kube-deploy' in group_names"

- name: install linux-image-extra kube-masters kube-nodes
  apt: name=linux-image-extra-{{ uname_r }}
  when: "('kube-masters' in group_names) or ('kube-nodes' in group_names)"

#- name: restart servers
#  shell: sleep 2 && shutdown -r now
#  async: 0
#  poll: 0
#  ignore_errors: true

#- name: Waiting for server to come back
#  local_action: wait_for host={{ inventory_hostname }} \
#                state=started port=22 delay=10 timeout=300

#- name: Update apt
#  apt: update_cache=yes

- name: install docker
  apt: name=docker-engine

- name: add docker group
  user: name={{ add_user }} group=docker

- name: copy docker config
  copy:
    src: "docker"
    dest: "/etc/default"
    mode: 0755
    owner: root

- name: copy docker.service
  copy:
    src: "docker.service"
    dest: "/lib/systemd/system"
    mode: 0644
    owner: root

- name: reload systemd
  shell: systemctl daemon-reload

- name: restart docker
  service: name=docker state=restarted enabled=yes
  tags:
    - restart docker

- name: export DOCKER_HOST
  lineinfile:
    "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export DOCKER_HOST'
     line='export DOCKER_HOST=127.0.0.1:4243'"


$ sudo ansible-playbook -i hosts 01-install-docker.yml



[ etcd 설치 ]

sudo chown -R stack.stack ~/.ansible
$ vi 02-etcd.yml
---
# This playbook installs etcd cluster.

- name: Setup etcd
  hosts: kube-masters
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - etcd


## --listen-peer-urls 과 --listen-client-urls 은 0.0.0.0 주소로 줄 수 있음
$ mkdir -p roles/etcd/templates && vi roles/etcd/templates/etcd.conf.j2
DAEMON_ARGS="--name {{ inventory_hostname }} \
--data-dir={{ etcd_data_dir }} \
--initial-advertise-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379,http://127.0.0.1:2379,\
http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001,http://127.0.0.1:4001 \
--advertise-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379 \
--initial-cluster-token etcd-cluster-1 \
{% for host in groups['kube-masters'] %}
{% if host == groups['kube-masters']|first %}
--initial-cluster {{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% else %},{{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% endif %}{% endfor %} \

--initial-cluster-state new"


$ mkdir -p roles/etcd/tasks && vi roles/etcd/tasks/main.yml
---
- name: install etcd
  apt: name=etcd update_cache=yes

- name: copy etcd config
  template: src=etcd.conf.j2 dest=/etc/default/etcd

- name: enable etcd systemd
  service: name=etcd enabled=yes

- name: restart etcd
  service: name=etcd state=restarted
  tags:
    - restart etcd






$ ansible-playbook -i hosts 02-etcd.yml



## 특정 task 만 돌릴 경우
$ ansible-playbook -i hosts --start-at-task="Restart etcd" 02-etcd.yml

## etcd 멤버 조회 (2개가 나와야 함)
$ etcdctl member list

## etcd 테스트
$ etcdctl --endpoint http://192.168.30.13:2379 set /test "hello"
$ etcdctl --endpoint http://192.168.30.14:2379 get /test
$ etcdctl --endpoint http://192.168.30.13:2379 rm /test

$ etcdctl --no-sync --endpoint http://kube-master01:2379 --debug ls / -recursive




[ flannel 설치 ]

## flannel 데이터를 etcd 입력 및 설치
$ vi 03-flannel.yml
---
- name: Setup flannel
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - flannel


## flannel 을 위한 네트워크 값을  etcd 에 등록하는 스트립트
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/config-flannel.sh.j2
#!/bin/bash

exec curl -s -L http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'



## 좀 무식한 방식임
{% if inventory_hostname == 'kube-master01' %}
exec curl -s -L http://{{ groups['kube-masters'][0] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
{% endif %}


## flannel 을 다운 받아서 build 하는 스크립트
$ vi roles/flannel/templates/download-flannel.sh.j2
#!/bin/bash

FLANNEL_DIR=${HOME}/github/flannel
FLANNEL_VERSION="{{ flannel_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

function chdir() {
    cd $1
}

if [ ! -d ${FLANNEL_DIR} ]; then
    mkdir -p ${HOME}/github
    chdir ${HOME}/github
    git clone https://github.com/coreos/flannel.git
    chdir ${FLANNEL_DIR}
    git checkout -b ${FLANNEL_VERSION} tags/${FLANNEL_VERSION}
fi

chdir ${FLANNEL_DIR}

if [ ! -f build ]; then
cat <<EOF >build
#!/bin/bash -e

ORG_PATH="github.com/coreos"
REPO_PATH="\${ORG_PATH}/flannel"

if [ ! -h gopath/src/\${REPO_PATH} ]; then
        mkdir -p gopath/src/\${ORG_PATH}
        ln -s ../../../.. gopath/src/\${REPO_PATH} || exit 255
fi

export GOBIN=\${PWD}/bin
export GOPATH=\${PWD}/gopath

eval \$(go env)

if [ \${GOOS} = "linux" ]; then
        echo "Building flanneld..."
        go build -o \${GOBIN}/flanneld \${REPO_PATH}
else
        echo "Not on Linux - skipping flanneld build"
fi
EOF
fi

chmod 755 build
./build

mkdir -p ${ANSIBLE_HOME}/roles/flannel/files
cp bin/flanneld ${ANSIBLE_HOME}/roles/flannel/files



## flannel.service 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/flanneld.service.j2
[Unit]
Description=flanneld Service
#After=etcd.service
#Requires=etcd.service

[Service]
EnvironmentFile=/etc/default/flanneld
PermissionsStartOnly=true
User=root
ExecStart=/usr/bin/flanneld -etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
Restart=always
RestartSec=10s
RemainAfterExit=yes

[Install]
WantedBy=multi-user.target
Alias=flanneld.service


## flannel config 파일
$ vi roles/flannel/templates/flanneld.j2
FLANNEL_ETCD="http://{{ groups['kube-masters'][0] }}:4001"
FLANNEL_OPTIONS="-v 0"


## flannel 과 연계된 docker 설정 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/docker-config.sh.j2
#! /bin/bash

ip link set dev docker0 down
brctl delbr docker0

source /run/flannel/subnet.env

echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 \
-H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} \
--mtu=${FLANNEL_MTU}\" > /etc/default/docker



## flannel ansible 설치 로직
$ mkdir -p roles/flannel/tasks && vi roles/flannel/tasks/main.yml
---
# set flannel data into etcd
- name: copy config-flannel
  template: src=config-flannel.sh.j2 dest=~/config-flannel.sh mode=755
  when: inventory_hostname == 'kube-master01'

- name: run config-flannel
  command: ~/config-flannel.sh
  when: inventory_hostname == 'kube-master01'

- name: remove config-flannel
  file: name=~/config-flannel.sh state=absent
  when: inventory_hostname == 'kube-master01'

# flannel download, build, install
- name: copy download-flannel
  template: src=download-flannel.sh.j2 dest=~/download-flannel.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-flannel
  command: ~/download-flannel.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-flannel
  file: name=~/download-flannel.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: copy flanneld
  copy: src=flanneld dest=/usr/bin/flanneld owner=root mode=0755
  when: "'kube-nodes' in group_names"

- name: copy flanneld.service
  template: src=flanneld.service.j2 dest=/lib/systemd/system/flanneld.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: resize MTU
  command: ip link set dev {{ hostvars[item][item + '-iface'] }} mtu {{ mtu_size }}
  with_items: groups['kube-nodes']
  when: "'kube-nodes' in group_names"

- name: copy flanneld config
  template: src=flanneld.j2 dest=/etc/default/flanneld
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-nodes' in group_names"

- name: restart flannel
  service: name=flanneld state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart flannel
  notify:
    - restart flannel



## handler 는 task 의 notify 로 호출됨
$ mkdir -p roles/flannel/handlers && vi roles/flannel/handlers/main.yml
---
- name: restart flannel
  service: name=flanneld state=restarted
  notify:
    - stop docker
    - delete docker0
    - copy docker-config
    - run docker-config
    - remove docker-config
    - start docker
  when: "'kube-nodes' in group_names"

- name: stop docker
  service: name=docker state=stopped
  when: "'kube-nodes' in group_names"

- name: delete docker0
  command: ip link delete docker0
  ignore_errors: yes
  when: "'kube-nodes' in group_names"

- name: copy docker-config
  template: src=docker-config.sh.j2 dest=~/docker-config.sh mode=755
  when: "'kube-nodes' in group_names"

- name: run docker-config
  command: ~/docker-config.sh
  ignore_errors: true
  when: "'kube-nodes' in group_names"

- name: remove docker-config
  file: name=~/docker-config.sh state=absent
  when: "'kube-nodes' in group_names"

- name: start docker
  service: name=docker state=started
  when: "'kube-nodes' in group_names"



$ ansible-playbook -i hosts 03-flannel.yml 
 






###################################
## k8s 소스 다운로드 및 make  (ansible)
###################################
## cert 파일 만들기
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-cert.sh
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh

## kubernetes 다운로드 및 설치
$ vi 04-kubernetes.yml
---
- name: Setup kubernetes
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - kubernetes



## k8s 을 다운 받아서 build 하는 스크립트 (GOPATH 와 PATH 가 중요)
$ mkdir -p roles/kubernetes/templates && vi roles/kubernetes/templates/download-kubernetes.sh.j2
#!/bin/bash

GO_HOME=${HOME}/{{ gopath_dir }}
KUBE_HOME=${GO_HOME}/src/k8s.io/kubernetes
KUBE_VERSION="{{ kube_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

export GOPATH=${GO_HOME}:${KUBE_HOME}/Godeps/_workspace
export PATH=${GO_HOME}/bin:$PATH

function chdir() {
    cd $1
}

if [ ! -d ${KUBE_HOME} ]; then
    mkdir -p ${GO_HOME}/src/k8s.io
    chdir ${GO_HOME}/src/k8s.io
    go get -u github.com/jteeuwen/go-bindata/go-bindata
    git clone https://github.com/kubernetes/kubernetes.git
    chdir ${KUBE_HOME}
    git checkout -b ${KUBE_VERSION} origin/${KUBE_VERSION}
fi

chdir ${KUBE_HOME}
if [ ! -d ${KUBE_HOME}/_output ]; then
    make all
fi

mkdir -p ${ANSIBLE_HOME}/roles/kubernetes/files
cp _output/local/bin/linux/amd64/kube* ${ANSIBLE_HOME}/roles/kubernetes/files


## Kubernetes config 파일
$ vi roles/kubernetes/templates/kube-apiserver.conf.j2
KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:4001 \
--logtostderr=true \
--service-cluster-ip-range={{ service_cluster_ip_range }} \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,\
ResourceQuota,DenyEscalatingExec,SecurityContextDeny \
--service-node-port-range={{ service_node_port_range }} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key"


$ vi roles/kubernetes/templates/kube-apiserver.service.j2
[Unit]
Description=Kubernetes API Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-apiserver
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-controller-manager.conf.j2
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true"


$ vi roles/kubernetes/templates/kube-controller-manager.service.j2
[Unit]
Description=Kubernetes Controller Manager
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-controller-manager
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kube-scheduler.conf.j2
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--master=127.0.0.1:8080"


$ vi roles/kubernetes/templates/kube-scheduler.service.j2
[Unit]
Description=Kubernetes Scheduler
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-scheduler
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kubelet.conf.j2
KUBELET_OPTS="--address=0.0.0.0 \
--port=10250 \
--hostname-override={{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }} \
--api-servers=http://{{ kube_api_ip }}:8080 \
--logtostderr=true \
--cluster-dns={{ cluster_dns }} \
--cluster-domain={{ cluster_domain }}"


$ vi roles/kubernetes/templates/kubelet.service.j2
[Unit]
Description=Kubernetes Kubelet
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kubelet
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-proxy.conf.j2
KUBE_PROXY_OPTS="--master=http://{{ kube_api_ip }}:8080 --logtostderr=true"


$ vi roles/kubernetes/templates/kube-proxy.service.j2
[Unit]
Description=Kubernetes Proxy Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-proxy
ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



## kubernetes ansible 설치 로직
$ mkdir -p roles/kubernetes/tasks && vi roles/kubernetes/tasks/main.yml
---
- name: copy download-kubernetes.sh
  template: src=download-kubernetes.sh.j2 dest=~/download-kubernetes.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-kubernetes.sh
  command: ~/download-kubernetes.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-kubernetes.sh
  file: name=~/download-kubernetes.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: add kube-cert group
  group: name=kube-cert state=present

- name: make cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: make ca-cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh {{ kube_api_ip }}
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: change mod cert
  file:
    path: /srv/kubernetes
    mode: 0755
    recurse: yes
  when: "'kube-deploy' in group_names"

- name: create cert directory
  file: path=/srv/kubernetes state=directory owner=root mode=0755

- name: copy server.cert
  copy: src=/srv/kubernetes/server.cert dest=/srv/kubernetes/server.cert
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy server.key
  copy: src=/srv/kubernetes/server.key dest=/srv/kubernetes/server.key
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy ca.crt
  copy: src=/srv/kubernetes/ca.crt dest=/srv/kubernetes/ca.crt
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy kubectl
  copy: src=kubectl dest=/usr/local/bin/kubectl
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver
  copy: src=kube-apiserver dest=/usr/local/bin/kube-apiserver
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager
  copy: src=kube-controller-manager dest=/usr/local/bin/kube-controller-manager
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler
  copy: src=kube-scheduler dest=/usr/local/bin/kube-scheduler
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kubelet
  copy: src=kubelet dest=/usr/local/bin/kubelet
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy
  copy: src=kube-proxy dest=/usr/local/bin/kube-proxy
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-apiserver config
  template: src=kube-apiserver.conf.j2 dest=/etc/default/kube-apiserver
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver.service
  template: src=kube-apiserver.service.j2 dest=/lib/systemd/system/kube-apiserver.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager config
  template: src=kube-controller-manager.conf.j2 dest=/etc/default/kube-controller-manager
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager.service
  template: src=kube-controller-manager.service.j2 dest=/lib/systemd/system/kube-controller-manager.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler config
  template: src=kube-scheduler.conf.j2 dest=/etc/default/kube-scheduler
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler.service
  template: src=kube-scheduler.service.j2 dest=/lib/systemd/system/kube-scheduler.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kubelet config
  template: src=kubelet.conf.j2 dest=/etc/default/kubelet
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kubelet.service
  template: src=kubelet.service.j2 dest=/lib/systemd/system/kubelet.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy config
  template: src=kube-proxy.conf.j2 dest=/etc/default/kube-proxy
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy.service
  template: src=kube-proxy.service.j2 dest=/lib/systemd/system/kube-proxy.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-masters' in group_names"

- name: restart kube-apiserver
  service: name=kube-apiserver state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-apiserver

- name: restart kube-controller-manager
  service: name=kube-controller-manager state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-controller-manager

- name: restart kube-scheduler
  service: name=kube-scheduler state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-scheduler

- name: restart kubelet
  service: name=kubelet state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kubelet

- name: restart kube-proxy
  service: name=kube-proxy state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kube-proxy




ansible-playbook -i hosts 04-kubernetes.yml






















저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn
## root 계정으로 수행해야 함


# apt-get update
# apt-get install -y gcc make
# apt-get install -y python-pip python-dev python3-dev libevent-dev \
                            vlan libvirt-bin bridge-utils lvm2 openvswitch-switch \
                            python-libvirt nbd-client ipset ntp python-lzma \
                            p7zip-full arping qemu-kvm

# apt-get install -y python-tox libmysqlclient-dev libpq-dev \
                           libxml2-dev libxslt1-dev libvirt-dev libffi-dev

# apt-get install -y virtinst libsemanage1-dev python-semanage \
                            attr policycoreutils


## avocado 설치
# cd ~
# mkdir avocado && cd avocado
# git clone git://github.com/avocado-framework/avocado.git
# cd avocado
# make requirements
# python setup.py install


##  avocado plugin 설치 (avocado-vt)
# cd ~/avocado
# cd avocado
# make requirements-plugins
# make link


# vi ~/.config/avocado/avocado.conf
[datadir.paths]
base_dir = /root/avocado/avocado
test_dir = /root/avocado/avocado/examples/tests
data_dir = /usr/share/avocado/data
logs_dir = /root/avocado/avocado/job-results



## Bootstrapping Avocado-VT (vt-type : qemu, libvirt .....)
# ./scripts/avocado vt-bootstrap --vt-type libvirt



## Avocado plugins list 보기
# ./scripts/avocado plugins


## vt-type 별 test list 보기 (vt-type : qemu, libvirt .....)
# ./scripts/avocado list --vt-type libvirt --verbose


## libvirt test case 한개 돌리기
# ./scripts/avocado run type_specific.io-github-autotest-qemu.driver_load.with_balloon


## 결과 보기
# cd /root/avocado/avocado/job-results/job-2016-08-31T09.17-1daa785/\
html/results.html


## 전체 테스트 돌리기
# ./scripts/avocado run type_specific










저작자 표시 변경 금지
신고
Posted by OpenStack Korea 부대표 Stephen Ahn