반응형

Kubespray 와 같이 많은 role 을 가진 ansible playbook 에서는 선언된 변수와 실제 할당된 값을 살펴보는 것이 쉽지 않습니다. ansible 에서는 이러한 변수들에 대한 값, 특히 hostvar 값을 볼 수 가 있습니다.

일단 먼저 inventory 파일인 hosts.ini 로 어떤 host 와 group 이 지정되었는지 확인할 수 있습니다.

$ vi inventory/k1-seungkyua/hosts.ini
[all]
k1-master01 ip=192.168.30.13
k1-master02 ip=192.168.30.14
k1-master03 ip=192.168.30.15
k1-node01 ip=192.168.30.12
k1-node02 ip=192.168.30.17
k1-node03 ip=192.168.30.18
k1-node04 ip=192.168.30.21
k1-node05 ip=192.168.30.20

[etcd]
k1-master01
k1-master02
k1-master03

[kube-master]
k1-master01
k1-master02
k1-master03

[kube-node]
k1-node01
k1-node02
k1-node03
k1-node04
k1-node05

[k8s-cluster:children]
kube-master
kube-node

다양한 그룹이 있음을 볼 수 있는데 맨 마지막의 k8s-cluster 그룹은 하위로 kube-master 와 kube-node 를 가지고 있으므로, k8s-cluster 그룹이 모든 노드를 포함한다는 것을 알 수 있습니다.

이제 ansible 명령어로 하나의 host 노드 "k1-master01" 에 대해 적용된 변수를 볼 수 있습니다.
 
$ ansible -i inventory/k1-seungkyua/hosts.ini -m debug -a "var=hostvars['k1-master01']" k8s-cluster

이에 대한 결과는 다음과 같습니다.

k1-node01 | SUCCESS => {
    "hostvars['k1-master01']": {
        "admin_token": "QVFBTmRVVlh5UmZoTlJBQTMyZTh6Qk5uajV1VElrMDJEbWFwWmc9PQ==",
        "ansible_all_ipv4_addresses": [
            "192.168.230.13",
            "192.168.30.13",
            "172.16.31.64",
            "172.17.0.1",
            "10.233.0.1",
            "10.233.0.3",
            "10.233.55.72",
            "10.233.18.191"
        ],
        "ansible_all_ipv6_addresses": [
            "fe80::3ea8:2aff:fe1c:cfd4",
            "fe80::5eb9:1ff:fe8c:67dc",
            "fe80::42:98ff:fec4:7ad1",
            "fe80::ecee:eeff:feee:eeee",
            "fe80::ecee:eeff:feee:eeee"
        ],
        "ansible_apparmor": {
            "status": "enabled"
        },
        "ansible_architecture": "x86_64",
        "ansible_bios_date": "05/06/2015",
        "ansible_bios_version": "P89",
        "ansible_cali1845c564081": {
            "active": true,
            "device": "cali1845c564081",
            "features": {
                "busy_poll": "off [fixed]",
                "fcoe_mtu": "off [fixed]",
                "generic_receive_offload": "on",
                "generic_segmentation_offload": "on",
                "highdma": "on",
                "hw_tc_offload": "off [fixed]",
                "l2_fwd_offload": "off [fixed]",
                "large_receive_offload": "off [fixed]",
                "loopback": "off [fixed]",
                "netns_local": "off [fixed]",
                "ntuple_filters": "off [fixed]",
                "receive_hashing": "off [fixed]",
                "rx_all": "off [fixed]",
                "rx_checksumming": "on",
                "rx_fcs": "off [fixed]",
                "rx_vlan_filter": "off [fixed]",
                "rx_vlan_offload": "on",
                "rx_vlan_stag_filter": "off [fixed]",
                "rx_vlan_stag_hw_parse": "on",
                "scatter_gather": "on",
                "tcp_segmentation_offload": "on",
                "tx_checksum_fcoe_crc": "off [fixed]",
                "tx_checksum_ip_generic": "on",
                "tx_checksum_ipv4": "off [fixed]",
                "tx_checksum_ipv6": "off [fixed]",
                "tx_checksum_sctp": "off [fixed]",
                "tx_checksumming": "on",
                "tx_fcoe_segmentation": "off [fixed]",
                "tx_gre_segmentation": "on",
                "tx_gso_robust": "off [fixed]",
                "tx_ipip_segmentation": "on",
                "tx_lockless": "on [fixed]",
                "tx_nocache_copy": "off",
                "tx_scatter_gather": "on",
                "tx_scatter_gather_fraglist": "on",
                "tx_sit_segmentation": "on",
                "tx_tcp6_segmentation": "on",
                "tx_tcp_ecn_segmentation": "on",
                "tx_tcp_segmentation": "on",
                "tx_udp_tnl_segmentation": "on",
                "tx_vlan_offload": "on",
                "tx_vlan_stag_hw_insert": "on",
                "udp_fragmentation_offload": "on",
                "vlan_challenged": "off [fixed]"
            },
            "hw_timestamp_filters": [],
            "ipv6": [
                {
                    "address": "fe80::ecee:eeff:feee:eeee",
                    "prefix": "64",
                    "scope": "link"
                }
            ],
            "macaddress": "ee:ee:ee:ee:ee:ee",
            "mtu": 1500,
            "promisc": false,
            "speed": 10000,
            "timestamping": [
                "rx_software",
                "software"
            ],
            "type": "ether"
        },
        "ansible_uptime_seconds": 46351496,
        "ansible_user_dir": "/root",
        "ansible_user_gecos": "root",
        "ansible_user_gid": 0,
        "ansible_user_id": "root",
        "ansible_user_shell": "/bin/bash",
        "ansible_user_uid": 0,
        "ansible_userspace_architecture": "x86_64",
        "ansible_userspace_bits": "64",
        "ansible_verbosity": 0,
        "ansible_version": {
            "full": "2.7.7",
            "major": 2,
            "minor": 7,
            "revision": 7,
            "string": "2.7.7"
        },
        "ansible_virtualization_role": "host",
        "ansible_virtualization_type": "kvm",
        "calico_ip_auto_method": "can-reach=8.8.8.8",
        "ceph_version": "mimic",
        "dashboard_enabled": true,
        "docker_insecure_registries": [
            "tacorepo:5000"
        ],
        "gather_subset": [
            "all"
        ],
        "global_as_num": "65000",
        "group_names": [
            "etcd",
            "k8s-cluster",
            "kube-master"
        ],
        "groups": {
            "all": [
                "k1-master01",
                "k1-master02",
                "k1-master03",
                "k1-node01",
                "k1-node02",
                "k1-node03",
                "k1-node04",
                "k1-node05"
            ],
            "etcd": [
                "k1-master01",
                "k1-master02",
                "k1-master03"
            ],
            "k8s-cluster": [
                "k1-node01",
                "k1-node02",
                "k1-node03",
                "k1-node04",
                "k1-node05",
                "k1-master01",
                "k1-master02",
                "k1-master03"
            ],
            "kube-master": [
                "k1-master01",
                "k1-master02",
                "k1-master03"
            ],
            "kube-node": [
                "k1-node01",
                "k1-node02",
                "k1-node03",
                "k1-node04",
                "k1-node05"
            ],
            "ungrouped": []
        },
        "helm_enabled": true,
        "ingress_nginx_enabled": true,
        "ingress_nginx_host_network": true,
        "ingress_nginx_node_count": 1,
        "ingress_nginx_nodeselector": {
            "node-role.kubernetes.io/ingress": "true"
        },
        "inventory_dir": "/home/seungkyua/deploy/my-kubespray/inventory/k1-seungkyua",
        "inventory_file": "/home/seungkyua/deploy/my-kubespray/inventory/k1-seungkyua/hosts.ini",
        "inventory_hostname": "k1-master01",
        "inventory_hostname_short": "k1-master01",
        "ip": "192.168.30.13",
        "ipip_mode": "Never",
        "kubeadm_enabled": true,
        "kubeconfig_localhost": true,
        "kubectl_localhost": true,
        "local_volume_provisioner_enabled": true,
        "module_setup": true,
        "monitors": "192.168.30.23:6789,192.168.30.24:6789,192.168.30.25:6789",
        "nat_outgoing": true,
        "omit": "__omit_place_holder__4491a572b322dda2550f5ce4d8005946f456f738",
        "override_system_hostname": false,
        "peer_with_router": true,
        "peers": [
            {
                "as": "65000",
                "router_id": "192.168.30.1"
            }
        ],
        "playbook_dir": "/home/seungkyua/deploy/my-kubespray",
        "pool_name": "kubes",
        "populate_inventory_to_hosts_file": false,
        "storageclass_name": "rbd",
        "user_id": "kube",
        "user_secret_namespace": "openstack",
        "user_token": "QVFDdC9CcFlpZ0o3TVJBQTV2eStjbDM5RXNLcFkzQyt0WEVHckE9PQ=="
    }
}




반응형
Posted by seungkyua@gmail.com
,
반응형
변수의 key 값을 가져오기 위해서는 regex_replace 함수를 사용하여 substring 을 할 수 도 있지만 set_fact 를 이용할 수 도 있습니다.

아래 내용은 set_fact 를 활용한 내용입니다.

예제는 ingress controller 가 실행될 node label 을 세팅하기 위한 방법입니다.
node label 은 다음과 같은 명령어로 세팅할 수 있습니다.
$ kubectl label --overwrite node k2-ctrl01 node-role.kubernetes.io/ingress=true


먼저 변수를 세팅하는 defaults/main.yml 파일은 다음과 같습니다.
ingress_nginx_nodeselector:
  node-role.kubernetes.io/ingress: "true"
ingress_nginx_node_count: 1

ingress_nginx_nodeselector 의 dict 값은 node-role.kubernetes.io/ingress: "true" 인데 node-role.kubernetes.io 는 key 값으로 이 값이 변경될 수 있으므로 key 를 사용하기에는 어렵습니다.

key 값을 유동적으로 활용하기 위해서는 set_fact 를 활용합니다.
- name: Set ingress nginx node role
  set_fact:
    ingress_nginx_node_role:
      key: "{{ item.key }}"
      value: "{{ item.value }}"
  with_dict: "{{ ingress_nginx_nodeselector }}"
  when:
    - inventory_hostname == groups['kube-master'][0]

위 태스크가 실행되면 ingress_nginx_node_role 변수에 {"key" : "node-role.kubernetes.io/ingress", "value" : "true"} 라고 저장됩니다.
그럼 이제 쉽게 ingress_nginx_node_role.key 로 해당 값을 가져올 수 있습니다.

- name: Set ingress nginx node list
  set_fact:
    ingress_nginx_node_list:
      - "{{ groups['kube-node'][item|int] }}"
  with_sequence: start=0 end={{ (ingress_nginx_node_count-1)|int }}
  when:
    - inventory_hostname == groups['kube-master'][0]

- name: NGINX Ingress Controller | Set node label
  shell: |
    {{ bin_dir }}/kubectl label --overwrite node {{ item }} {{ ingress_nginx_node_role.key }}={{ ingress_nginx_node_role.value }}
  loop: "{{ ingress_nginx_node_list }}"
  when:
    - inventory_hostname == groups['kube-master'][0]

label 을 세팅하기 전에 node 를 선택해야 하기 때문에 node 를 선택하는 "Set ingress nginx node list" 태스크를 먼저 수행했고 "NGINX Ingress Controller | Set node label" 태스크가 node label 을 지정하는 명령어 입니다.



 




반응형
Posted by seungkyua@gmail.com
,
반응형

## ansbile 스크립트 만들 때 참고 명령어

## 변수 보기

$ ansible -i hosts -m debug -a "var=hostvars['kube-master01']" localhost
$ ansible -i hosts -m debug -a "var=groups['kube-masters']" localhost

## 하나의 task 만 수행 (restart flannel)
$ ansible-playbook -i hosts --start-at-task='restart flannel' 03-flannel.yml

## task 조회
$ ansible-playbook -i hosts 03-flannel.yml --list-tasks


## 모든 소스는 github 에 ...



[ 모든 node 에서 수행 ]

$ sudo su -
# vi /etc/ssh/sshd_config
28 PermitRootLogin yes
52 PasswordAuthentication yes

# systemctl restart sshd
# passwd
# passwd stack


$ sudo apt-get install python2.7 python-minimal

$ sudo apt-get update
$ sudo apt-get install -y ansible

## ntp ntpdate 패키지를 설치해서 ntp 를 세팅해야 함




[ git init 설정 후 github 에 올리기 ]
$ cd ~/kubernetes-ansible
$ git init
$ git remote add origin https://github.com/seungkyua/kubernetes-ansible.git
$ git pull origin master
$ git config user.name "Seungkyu Ahn"
$ git config user.email "seungkyua@gmail.com"
$ git add -A
$ git commit -a -m "Intial commit"
$ git push --set-upstream origin master




[ Prerequisite ]
$ vi README.md
# Prerequisite #

 - This ansible-playbook is tested in Ubuntu 16.04 LTS
 - Need one Kubernetes deploy node (a.k.a kube-deploy)
 - Login from kube-deploy to all Kubernetes nodes by `root` user without password using hostname
 - kube-deploy should have swap memory over 2G byte
 - Every work should be executed by `stack` user at kube-deploy
 - Every nodes should be installed ansible and python packages

```
$ sudo apt-get update
$ sudo apt-get install -y ansible python2.7 python-minimal
```

 - Group names and node names can not be changed in `hosts` file

```
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16
```

 - Have to changed your own password at `add_user_password` field in `group_vars/all` file


## Tips ##

An encrypted password can figure out following command

```
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512
[ input password and enter]
```


## Execute order ##

```
$ sudo ansible-playbook -i hosts 00-create-user.yml
$ sudo ansible-playbook -i hosts 00-install-package.yml
$ sudo ansible-playbook -i hosts 01-install-docker.yml
$ sudo chown -R stack.stack ~/.ansible
$ ansible-playbook -i hosts 02-etcd.yml
$ ansible-playbook -i hosts 03-flannel.yml
$ ansible-playbook -i hosts 04-kubernetes.yml
```


## restart service ##

 - restart docker

```
$ sudo ansible-playbook -i hosts --tags="restart docker" 01-install-docker.yml
```

 - restart etcd

```
$ ansible-playbook -i hosts --tags="restart etcd" 02-etcd.yml
```

 - restart flannel

```
$ ansible-playbook -i hosts --tags="restart flannel" 03-flannel.yml
```

 - restart kubernetes

```
$ ansible-playbook -i hosts --tags="restart kube-apiserver,restart kube-controller-manager,restart kube-scheduler,restart kube-proxy,restart kubelet" 04-kubernetes.yml
```



[ kube-deploy node 에서 수행 ]
## kube-deploy node 접속
$ ssh -i ~/magnum-key.pem stack@192.168.30.138
$ ssh-keygen -t rsa
$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id kube-deploy
# ssh-copy-id kube-master01
# ssh-copy-id kube-master02
# ssh-copy-id kube-node01
# ssh-copy-id kube-node02


$ mkdir -p ~/kubernetes-ansible && cd ~/kubernetes-ansible
$ vi hosts
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16


$ vi ansible.cfg
[defaults]
host_key_checking = False



## password encrypt 값을 알아냄
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512



## 환경 변수 세팅
$ mkdir -p group_vars && vi group_vars/all
ansible_dir: "kubernetes-ansible"
gopath_dir: "go_workspace"
add_user: "stack"
add_user_password: "gernerated password here !"
ubuntu_release: "xenial"
kube_deploy_uname_r: "4.4.0-22-generic"
uname_r: "4.4.0-21-generic"
etcd_data_dir: "/ext/data/etcd"
flannel_version: "v0.6.1"
flannel_net: "172.16.0.0/16"
mtu_size: "1500"
kube_version: "release-1.4"
kube_api_ip: "192.168.30.13"
service_cluster_ip_range: "192.168.30.192/26"
service_node_port_range: "30000-32767"
cluster_dns: "192.168.30.200"
cluster_domain: "cluster.local" 
 



$ mkdir -p files && vi files/hosts
127.0.0.1       localhost

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts


10.0.0.18           kube-depoly
192.168.30.138  kube-deploy
192.168.30.13    kube-master01
192.168.30.14    kube-master02
192.168.30.15    kube-node01
192.168.30.16    kube-node02




## user 생성 (stack), key 자동 복사, sudo user 등록, 환경변수 세팅, host 파일 복사
$ 00-create-user.yml
---
- name: create the user
  hosts: all
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Add the {{ add_user }} user
      user: name={{ add_user }} groups=sudo createhome=yes shell=/bin/bash
            password={{ add_user_password }} append=yes
            generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa

    - name: Set up authorized_keys for the {{ add_user }}
      authorized_key: user={{ add_user }} key="{{ lookup('file', '/home/{{ add_user }}/.ssh/id_rsa.pub') }}"

    - name: sudo 
      lineinfile:
        "dest=/etc/sudoers state=present regexp='^{{ add_user }} ALL='
         line='{{ add_user }} ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'"

    - name: export GOPATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export GOPATH' line='export GOPATH=$HOME/{{ gopath_dir }}:$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes/Godeps/_workspace'"

    - name: export PATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export PATH'
         line='export PATH=$HOME/{{ gopath_dir }}/bin:$PATH'"

    - name: export KUBE_ROOT
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export KUBE_ROOT'
         line='export KUBE_ROOT=$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes'"

    - name: Copy hosts file
      copy:
        src: "files/hosts"
        dest: "/etc"
        owner: root


sudo ansible-playbook -i hosts 00-create-user.yml




## apt-get package 설치
$ vi 00-install-package.yml
---
- name: install package
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Install apt packages
      apt: name={{ item }}  update_cache=yes
      with_items:
        - bridge-utils
        - linux-libc-dev
        - golang
        - gcc
        - curl
        - git


sudo ansible-playbook -i hosts 00-install-package.yml



## docker install
$ vi 01-install-docker.yml
---
# This playbook setup docker package

- hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  roles:
    - docker




$ mkdir -p roles/docker/files && vi roles/docker/files/docker.xenial.list
deb https://apt.dockerproject.org/repo ubuntu-xenial main


$ vi roles/docker/files/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/etc/default/docker
ExecStart=/usr/bin/docker daemon $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target



$ vi roles/docker/files/docker
DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock"



$ mkdir -p roles/docker/tasks && vi roles/docker/tasks/main.yml
---
- name: install apt-transport-https ca-certificates
  apt: name={{ item }}
  with_items:
    - apt-transport-https
    - ca-certificates

- name: add GPG key
  apt_key: keyserver=hkp://p80.pool.sks-keyservers.net:80 \
           id=58118E89F3A912897C070ADBF76221572C52609D

- name: add docker.list
  copy:
    src: "docker.{{ ubuntu_release }}.list"
    dest: "/etc/apt/sources.list.d"
    owner: root

- name: apt-get update
  apt: update_cache=yes

- name: install linux-image-extra kube-deploy
  apt: name=linux-image-extra-{{ kube_depoloy_uname_r }}
  when: "'kube-deploy' in group_names"

- name: install linux-image-extra kube-masters kube-nodes
  apt: name=linux-image-extra-{{ uname_r }}
  when: "('kube-masters' in group_names) or ('kube-nodes' in group_names)"

#- name: restart servers
#  shell: sleep 2 && shutdown -r now
#  async: 0
#  poll: 0
#  ignore_errors: true

#- name: Waiting for server to come back
#  local_action: wait_for host={{ inventory_hostname }} \
#                state=started port=22 delay=10 timeout=300

#- name: Update apt
#  apt: update_cache=yes

- name: install docker
  apt: name=docker-engine

- name: add docker group
  user: name={{ add_user }} group=docker

- name: copy docker config
  copy:
    src: "docker"
    dest: "/etc/default"
    mode: 0755
    owner: root

- name: copy docker.service
  copy:
    src: "docker.service"
    dest: "/lib/systemd/system"
    mode: 0644
    owner: root

- name: reload systemd
  shell: systemctl daemon-reload

- name: restart docker
  service: name=docker state=restarted enabled=yes
  tags:
    - restart docker

- name: export DOCKER_HOST
  lineinfile:
    "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export DOCKER_HOST'
     line='export DOCKER_HOST=127.0.0.1:4243'"


$ sudo ansible-playbook -i hosts 01-install-docker.yml



[ etcd 설치 ]

sudo chown -R stack.stack ~/.ansible
$ vi 02-etcd.yml
---
# This playbook installs etcd cluster.

- name: Setup etcd
  hosts: kube-masters
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - etcd


## --listen-peer-urls 과 --listen-client-urls 은 0.0.0.0 주소로 줄 수 있음
$ mkdir -p roles/etcd/templates && vi roles/etcd/templates/etcd.conf.j2
DAEMON_ARGS="--name {{ inventory_hostname }} \
--data-dir={{ etcd_data_dir }} \
--initial-advertise-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379,http://127.0.0.1:2379,\
http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001,http://127.0.0.1:4001 \
--advertise-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379 \
--initial-cluster-token etcd-cluster-1 \
{% for host in groups['kube-masters'] %}
{% if host == groups['kube-masters']|first %}
--initial-cluster {{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% else %},{{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% endif %}{% endfor %} \

--initial-cluster-state new"


$ mkdir -p roles/etcd/tasks && vi roles/etcd/tasks/main.yml
---
- name: install etcd
  apt: name=etcd update_cache=yes

- name: copy etcd config
  template: src=etcd.conf.j2 dest=/etc/default/etcd

- name: enable etcd systemd
  service: name=etcd enabled=yes

- name: restart etcd
  service: name=etcd state=restarted
  tags:
    - restart etcd






$ ansible-playbook -i hosts 02-etcd.yml



## 특정 task 만 돌릴 경우
$ ansible-playbook -i hosts --start-at-task="Restart etcd" 02-etcd.yml

## etcd 멤버 조회 (2개가 나와야 함)
$ etcdctl member list

## etcd 테스트
$ etcdctl --endpoint http://192.168.30.13:2379 set /test "hello"
$ etcdctl --endpoint http://192.168.30.14:2379 get /test
$ etcdctl --endpoint http://192.168.30.13:2379 rm /test

$ etcdctl --no-sync --endpoint http://kube-master01:2379 --debug ls / -recursive




[ flannel 설치 ]

## flannel 데이터를 etcd 입력 및 설치
$ vi 03-flannel.yml
---
- name: Setup flannel
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - flannel


## flannel 을 위한 네트워크 값을  etcd 에 등록하는 스트립트
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/config-flannel.sh.j2
#!/bin/bash

exec curl -s -L http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'



## 좀 무식한 방식임
{% if inventory_hostname == 'kube-master01' %}
exec curl -s -L http://{{ groups['kube-masters'][0] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
{% endif %}


## flannel 을 다운 받아서 build 하는 스크립트
$ vi roles/flannel/templates/download-flannel.sh.j2
#!/bin/bash

FLANNEL_DIR=${HOME}/github/flannel
FLANNEL_VERSION="{{ flannel_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

function chdir() {
    cd $1
}

if [ ! -d ${FLANNEL_DIR} ]; then
    mkdir -p ${HOME}/github
    chdir ${HOME}/github
    git clone https://github.com/coreos/flannel.git
    chdir ${FLANNEL_DIR}
    git checkout -b ${FLANNEL_VERSION} tags/${FLANNEL_VERSION}
fi

chdir ${FLANNEL_DIR}

if [ ! -f build ]; then
cat <<EOF >build
#!/bin/bash -e

ORG_PATH="github.com/coreos"
REPO_PATH="\${ORG_PATH}/flannel"

if [ ! -h gopath/src/\${REPO_PATH} ]; then
        mkdir -p gopath/src/\${ORG_PATH}
        ln -s ../../../.. gopath/src/\${REPO_PATH} || exit 255
fi

export GOBIN=\${PWD}/bin
export GOPATH=\${PWD}/gopath

eval \$(go env)

if [ \${GOOS} = "linux" ]; then
        echo "Building flanneld..."
        go build -o \${GOBIN}/flanneld \${REPO_PATH}
else
        echo "Not on Linux - skipping flanneld build"
fi
EOF
fi

chmod 755 build
./build

mkdir -p ${ANSIBLE_HOME}/roles/flannel/files
cp bin/flanneld ${ANSIBLE_HOME}/roles/flannel/files



## flannel.service 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/flanneld.service.j2
[Unit]
Description=flanneld Service
#After=etcd.service
#Requires=etcd.service

[Service]
EnvironmentFile=/etc/default/flanneld
PermissionsStartOnly=true
User=root
ExecStart=/usr/bin/flanneld -etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
Restart=always
RestartSec=10s
RemainAfterExit=yes

[Install]
WantedBy=multi-user.target
Alias=flanneld.service


## flannel config 파일
$ vi roles/flannel/templates/flanneld.j2
FLANNEL_ETCD="http://{{ groups['kube-masters'][0] }}:4001"
FLANNEL_OPTIONS="-v 0"


## flannel 과 연계된 docker 설정 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/docker-config.sh.j2
#! /bin/bash

ip link set dev docker0 down
brctl delbr docker0

source /run/flannel/subnet.env

echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 \
-H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} \
--mtu=${FLANNEL_MTU}\" > /etc/default/docker



## flannel ansible 설치 로직
$ mkdir -p roles/flannel/tasks && vi roles/flannel/tasks/main.yml
---
# set flannel data into etcd
- name: copy config-flannel
  template: src=config-flannel.sh.j2 dest=~/config-flannel.sh mode=755
  when: inventory_hostname == 'kube-master01'

- name: run config-flannel
  command: ~/config-flannel.sh
  when: inventory_hostname == 'kube-master01'

- name: remove config-flannel
  file: name=~/config-flannel.sh state=absent
  when: inventory_hostname == 'kube-master01'

# flannel download, build, install
- name: copy download-flannel
  template: src=download-flannel.sh.j2 dest=~/download-flannel.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-flannel
  command: ~/download-flannel.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-flannel
  file: name=~/download-flannel.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: copy flanneld
  copy: src=flanneld dest=/usr/bin/flanneld owner=root mode=0755
  when: "'kube-nodes' in group_names"

- name: copy flanneld.service
  template: src=flanneld.service.j2 dest=/lib/systemd/system/flanneld.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: resize MTU
  command: ip link set dev {{ hostvars[item][item + '-iface'] }} mtu {{ mtu_size }}
  with_items: groups['kube-nodes']
  when: "'kube-nodes' in group_names"

- name: copy flanneld config
  template: src=flanneld.j2 dest=/etc/default/flanneld
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-nodes' in group_names"

- name: restart flannel
  service: name=flanneld state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart flannel
  notify:
    - restart flannel



## handler 는 task 의 notify 로 호출됨
$ mkdir -p roles/flannel/handlers && vi roles/flannel/handlers/main.yml
---
- name: restart flannel
  service: name=flanneld state=restarted
  notify:
    - stop docker
    - delete docker0
    - copy docker-config
    - run docker-config
    - remove docker-config
    - start docker
  when: "'kube-nodes' in group_names"

- name: stop docker
  service: name=docker state=stopped
  when: "'kube-nodes' in group_names"

- name: delete docker0
  command: ip link delete docker0
  ignore_errors: yes
  when: "'kube-nodes' in group_names"

- name: copy docker-config
  template: src=docker-config.sh.j2 dest=~/docker-config.sh mode=755
  when: "'kube-nodes' in group_names"

- name: run docker-config
  command: ~/docker-config.sh
  ignore_errors: true
  when: "'kube-nodes' in group_names"

- name: remove docker-config
  file: name=~/docker-config.sh state=absent
  when: "'kube-nodes' in group_names"

- name: start docker
  service: name=docker state=started
  when: "'kube-nodes' in group_names"



$ ansible-playbook -i hosts 03-flannel.yml 
 






###################################
## k8s 소스 다운로드 및 make  (ansible)
###################################
## cert 파일 만들기
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-cert.sh
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh

## kubernetes 다운로드 및 설치
$ vi 04-kubernetes.yml
---
- name: Setup kubernetes
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - kubernetes



## k8s 을 다운 받아서 build 하는 스크립트 (GOPATH 와 PATH 가 중요)
$ mkdir -p roles/kubernetes/templates && vi roles/kubernetes/templates/download-kubernetes.sh.j2
#!/bin/bash

GO_HOME=${HOME}/{{ gopath_dir }}
KUBE_HOME=${GO_HOME}/src/k8s.io/kubernetes
KUBE_VERSION="{{ kube_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

export GOPATH=${GO_HOME}:${KUBE_HOME}/Godeps/_workspace
export PATH=${GO_HOME}/bin:$PATH

function chdir() {
    cd $1
}

if [ ! -d ${KUBE_HOME} ]; then
    mkdir -p ${GO_HOME}/src/k8s.io
    chdir ${GO_HOME}/src/k8s.io
    go get -u github.com/jteeuwen/go-bindata/go-bindata
    git clone https://github.com/kubernetes/kubernetes.git
    chdir ${KUBE_HOME}
    git checkout -b ${KUBE_VERSION} origin/${KUBE_VERSION}
fi

chdir ${KUBE_HOME}
if [ ! -d ${KUBE_HOME}/_output ]; then
    make all
fi

mkdir -p ${ANSIBLE_HOME}/roles/kubernetes/files
cp _output/local/bin/linux/amd64/kube* ${ANSIBLE_HOME}/roles/kubernetes/files


## Kubernetes config 파일
$ vi roles/kubernetes/templates/kube-apiserver.conf.j2
KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:4001 \
--logtostderr=true \
--service-cluster-ip-range={{ service_cluster_ip_range }} \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,\
ResourceQuota,DenyEscalatingExec,SecurityContextDeny \
--service-node-port-range={{ service_node_port_range }} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key"


$ vi roles/kubernetes/templates/kube-apiserver.service.j2
[Unit]
Description=Kubernetes API Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-apiserver
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-controller-manager.conf.j2
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true"


$ vi roles/kubernetes/templates/kube-controller-manager.service.j2
[Unit]
Description=Kubernetes Controller Manager
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-controller-manager
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kube-scheduler.conf.j2
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--master=127.0.0.1:8080"


$ vi roles/kubernetes/templates/kube-scheduler.service.j2
[Unit]
Description=Kubernetes Scheduler
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-scheduler
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kubelet.conf.j2
KUBELET_OPTS="--address=0.0.0.0 \
--port=10250 \
--hostname-override={{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }} \
--api-servers=http://{{ kube_api_ip }}:8080 \
--logtostderr=true \
--cluster-dns={{ cluster_dns }} \
--cluster-domain={{ cluster_domain }}"


$ vi roles/kubernetes/templates/kubelet.service.j2
[Unit]
Description=Kubernetes Kubelet
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kubelet
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-proxy.conf.j2
KUBE_PROXY_OPTS="--master=http://{{ kube_api_ip }}:8080 --logtostderr=true"


$ vi roles/kubernetes/templates/kube-proxy.service.j2
[Unit]
Description=Kubernetes Proxy Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-proxy
ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



## kubernetes ansible 설치 로직
$ mkdir -p roles/kubernetes/tasks && vi roles/kubernetes/tasks/main.yml
---
- name: copy download-kubernetes.sh
  template: src=download-kubernetes.sh.j2 dest=~/download-kubernetes.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-kubernetes.sh
  command: ~/download-kubernetes.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-kubernetes.sh
  file: name=~/download-kubernetes.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: add kube-cert group
  group: name=kube-cert state=present

- name: make cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: make ca-cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh {{ kube_api_ip }}
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: change mod cert
  file:
    path: /srv/kubernetes
    mode: 0755
    recurse: yes
  when: "'kube-deploy' in group_names"

- name: create cert directory
  file: path=/srv/kubernetes state=directory owner=root mode=0755

- name: copy server.cert
  copy: src=/srv/kubernetes/server.cert dest=/srv/kubernetes/server.cert
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy server.key
  copy: src=/srv/kubernetes/server.key dest=/srv/kubernetes/server.key
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy ca.crt
  copy: src=/srv/kubernetes/ca.crt dest=/srv/kubernetes/ca.crt
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy kubectl
  copy: src=kubectl dest=/usr/local/bin/kubectl
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver
  copy: src=kube-apiserver dest=/usr/local/bin/kube-apiserver
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager
  copy: src=kube-controller-manager dest=/usr/local/bin/kube-controller-manager
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler
  copy: src=kube-scheduler dest=/usr/local/bin/kube-scheduler
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kubelet
  copy: src=kubelet dest=/usr/local/bin/kubelet
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy
  copy: src=kube-proxy dest=/usr/local/bin/kube-proxy
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-apiserver config
  template: src=kube-apiserver.conf.j2 dest=/etc/default/kube-apiserver
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver.service
  template: src=kube-apiserver.service.j2 dest=/lib/systemd/system/kube-apiserver.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager config
  template: src=kube-controller-manager.conf.j2 dest=/etc/default/kube-controller-manager
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager.service
  template: src=kube-controller-manager.service.j2 dest=/lib/systemd/system/kube-controller-manager.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler config
  template: src=kube-scheduler.conf.j2 dest=/etc/default/kube-scheduler
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler.service
  template: src=kube-scheduler.service.j2 dest=/lib/systemd/system/kube-scheduler.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kubelet config
  template: src=kubelet.conf.j2 dest=/etc/default/kubelet
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kubelet.service
  template: src=kubelet.service.j2 dest=/lib/systemd/system/kubelet.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy config
  template: src=kube-proxy.conf.j2 dest=/etc/default/kube-proxy
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy.service
  template: src=kube-proxy.service.j2 dest=/lib/systemd/system/kube-proxy.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-masters' in group_names"

- name: restart kube-apiserver
  service: name=kube-apiserver state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-apiserver

- name: restart kube-controller-manager
  service: name=kube-controller-manager state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-controller-manager

- name: restart kube-scheduler
  service: name=kube-scheduler state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-scheduler

- name: restart kubelet
  service: name=kubelet state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kubelet

- name: restart kube-proxy
  service: name=kube-proxy state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kube-proxy




ansible-playbook -i hosts 04-kubernetes.yml






















반응형
Posted by seungkyua@gmail.com
,