반응형

## ansbile 스크립트 만들 때 참고 명령어

## 변수 보기

$ ansible -i hosts -m debug -a "var=hostvars['kube-master01']" localhost
$ ansible -i hosts -m debug -a "var=groups['kube-masters']" localhost

## 하나의 task 만 수행 (restart flannel)
$ ansible-playbook -i hosts --start-at-task='restart flannel' 03-flannel.yml

## task 조회
$ ansible-playbook -i hosts 03-flannel.yml --list-tasks


## 모든 소스는 github 에 ...



[ 모든 node 에서 수행 ]

$ sudo su -
# vi /etc/ssh/sshd_config
28 PermitRootLogin yes
52 PasswordAuthentication yes

# systemctl restart sshd
# passwd
# passwd stack


$ sudo apt-get install python2.7 python-minimal

$ sudo apt-get update
$ sudo apt-get install -y ansible

## ntp ntpdate 패키지를 설치해서 ntp 를 세팅해야 함




[ git init 설정 후 github 에 올리기 ]
$ cd ~/kubernetes-ansible
$ git init
$ git remote add origin https://github.com/seungkyua/kubernetes-ansible.git
$ git pull origin master
$ git config user.name "Seungkyu Ahn"
$ git config user.email "seungkyua@gmail.com"
$ git add -A
$ git commit -a -m "Intial commit"
$ git push --set-upstream origin master




[ Prerequisite ]
$ vi README.md
# Prerequisite #

 - This ansible-playbook is tested in Ubuntu 16.04 LTS
 - Need one Kubernetes deploy node (a.k.a kube-deploy)
 - Login from kube-deploy to all Kubernetes nodes by `root` user without password using hostname
 - kube-deploy should have swap memory over 2G byte
 - Every work should be executed by `stack` user at kube-deploy
 - Every nodes should be installed ansible and python packages

```
$ sudo apt-get update
$ sudo apt-get install -y ansible python2.7 python-minimal
```

 - Group names and node names can not be changed in `hosts` file

```
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16
```

 - Have to changed your own password at `add_user_password` field in `group_vars/all` file


## Tips ##

An encrypted password can figure out following command

```
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512
[ input password and enter]
```


## Execute order ##

```
$ sudo ansible-playbook -i hosts 00-create-user.yml
$ sudo ansible-playbook -i hosts 00-install-package.yml
$ sudo ansible-playbook -i hosts 01-install-docker.yml
$ sudo chown -R stack.stack ~/.ansible
$ ansible-playbook -i hosts 02-etcd.yml
$ ansible-playbook -i hosts 03-flannel.yml
$ ansible-playbook -i hosts 04-kubernetes.yml
```


## restart service ##

 - restart docker

```
$ sudo ansible-playbook -i hosts --tags="restart docker" 01-install-docker.yml
```

 - restart etcd

```
$ ansible-playbook -i hosts --tags="restart etcd" 02-etcd.yml
```

 - restart flannel

```
$ ansible-playbook -i hosts --tags="restart flannel" 03-flannel.yml
```

 - restart kubernetes

```
$ ansible-playbook -i hosts --tags="restart kube-apiserver,restart kube-controller-manager,restart kube-scheduler,restart kube-proxy,restart kubelet" 04-kubernetes.yml
```



[ kube-deploy node 에서 수행 ]
## kube-deploy node 접속
$ ssh -i ~/magnum-key.pem stack@192.168.30.138
$ ssh-keygen -t rsa
$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id kube-deploy
# ssh-copy-id kube-master01
# ssh-copy-id kube-master02
# ssh-copy-id kube-node01
# ssh-copy-id kube-node02


$ mkdir -p ~/kubernetes-ansible && cd ~/kubernetes-ansible
$ vi hosts
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16


$ vi ansible.cfg
[defaults]
host_key_checking = False



## password encrypt 값을 알아냄
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512



## 환경 변수 세팅
$ mkdir -p group_vars && vi group_vars/all
ansible_dir: "kubernetes-ansible"
gopath_dir: "go_workspace"
add_user: "stack"
add_user_password: "gernerated password here !"
ubuntu_release: "xenial"
kube_deploy_uname_r: "4.4.0-22-generic"
uname_r: "4.4.0-21-generic"
etcd_data_dir: "/ext/data/etcd"
flannel_version: "v0.6.1"
flannel_net: "172.16.0.0/16"
mtu_size: "1500"
kube_version: "release-1.4"
kube_api_ip: "192.168.30.13"
service_cluster_ip_range: "192.168.30.192/26"
service_node_port_range: "30000-32767"
cluster_dns: "192.168.30.200"
cluster_domain: "cluster.local" 
 



$ mkdir -p files && vi files/hosts
127.0.0.1       localhost

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts


10.0.0.18           kube-depoly
192.168.30.138  kube-deploy
192.168.30.13    kube-master01
192.168.30.14    kube-master02
192.168.30.15    kube-node01
192.168.30.16    kube-node02




## user 생성 (stack), key 자동 복사, sudo user 등록, 환경변수 세팅, host 파일 복사
$ 00-create-user.yml
---
- name: create the user
  hosts: all
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Add the {{ add_user }} user
      user: name={{ add_user }} groups=sudo createhome=yes shell=/bin/bash
            password={{ add_user_password }} append=yes
            generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa

    - name: Set up authorized_keys for the {{ add_user }}
      authorized_key: user={{ add_user }} key="{{ lookup('file', '/home/{{ add_user }}/.ssh/id_rsa.pub') }}"

    - name: sudo 
      lineinfile:
        "dest=/etc/sudoers state=present regexp='^{{ add_user }} ALL='
         line='{{ add_user }} ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'"

    - name: export GOPATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export GOPATH' line='export GOPATH=$HOME/{{ gopath_dir }}:$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes/Godeps/_workspace'"

    - name: export PATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export PATH'
         line='export PATH=$HOME/{{ gopath_dir }}/bin:$PATH'"

    - name: export KUBE_ROOT
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export KUBE_ROOT'
         line='export KUBE_ROOT=$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes'"

    - name: Copy hosts file
      copy:
        src: "files/hosts"
        dest: "/etc"
        owner: root


sudo ansible-playbook -i hosts 00-create-user.yml




## apt-get package 설치
$ vi 00-install-package.yml
---
- name: install package
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Install apt packages
      apt: name={{ item }}  update_cache=yes
      with_items:
        - bridge-utils
        - linux-libc-dev
        - golang
        - gcc
        - curl
        - git


sudo ansible-playbook -i hosts 00-install-package.yml



## docker install
$ vi 01-install-docker.yml
---
# This playbook setup docker package

- hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  roles:
    - docker




$ mkdir -p roles/docker/files && vi roles/docker/files/docker.xenial.list
deb https://apt.dockerproject.org/repo ubuntu-xenial main


$ vi roles/docker/files/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/etc/default/docker
ExecStart=/usr/bin/docker daemon $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target



$ vi roles/docker/files/docker
DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock"



$ mkdir -p roles/docker/tasks && vi roles/docker/tasks/main.yml
---
- name: install apt-transport-https ca-certificates
  apt: name={{ item }}
  with_items:
    - apt-transport-https
    - ca-certificates

- name: add GPG key
  apt_key: keyserver=hkp://p80.pool.sks-keyservers.net:80 \
           id=58118E89F3A912897C070ADBF76221572C52609D

- name: add docker.list
  copy:
    src: "docker.{{ ubuntu_release }}.list"
    dest: "/etc/apt/sources.list.d"
    owner: root

- name: apt-get update
  apt: update_cache=yes

- name: install linux-image-extra kube-deploy
  apt: name=linux-image-extra-{{ kube_depoloy_uname_r }}
  when: "'kube-deploy' in group_names"

- name: install linux-image-extra kube-masters kube-nodes
  apt: name=linux-image-extra-{{ uname_r }}
  when: "('kube-masters' in group_names) or ('kube-nodes' in group_names)"

#- name: restart servers
#  shell: sleep 2 && shutdown -r now
#  async: 0
#  poll: 0
#  ignore_errors: true

#- name: Waiting for server to come back
#  local_action: wait_for host={{ inventory_hostname }} \
#                state=started port=22 delay=10 timeout=300

#- name: Update apt
#  apt: update_cache=yes

- name: install docker
  apt: name=docker-engine

- name: add docker group
  user: name={{ add_user }} group=docker

- name: copy docker config
  copy:
    src: "docker"
    dest: "/etc/default"
    mode: 0755
    owner: root

- name: copy docker.service
  copy:
    src: "docker.service"
    dest: "/lib/systemd/system"
    mode: 0644
    owner: root

- name: reload systemd
  shell: systemctl daemon-reload

- name: restart docker
  service: name=docker state=restarted enabled=yes
  tags:
    - restart docker

- name: export DOCKER_HOST
  lineinfile:
    "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export DOCKER_HOST'
     line='export DOCKER_HOST=127.0.0.1:4243'"


$ sudo ansible-playbook -i hosts 01-install-docker.yml



[ etcd 설치 ]

sudo chown -R stack.stack ~/.ansible
$ vi 02-etcd.yml
---
# This playbook installs etcd cluster.

- name: Setup etcd
  hosts: kube-masters
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - etcd


## --listen-peer-urls 과 --listen-client-urls 은 0.0.0.0 주소로 줄 수 있음
$ mkdir -p roles/etcd/templates && vi roles/etcd/templates/etcd.conf.j2
DAEMON_ARGS="--name {{ inventory_hostname }} \
--data-dir={{ etcd_data_dir }} \
--initial-advertise-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379,http://127.0.0.1:2379,\
http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001,http://127.0.0.1:4001 \
--advertise-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379 \
--initial-cluster-token etcd-cluster-1 \
{% for host in groups['kube-masters'] %}
{% if host == groups['kube-masters']|first %}
--initial-cluster {{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% else %},{{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% endif %}{% endfor %} \

--initial-cluster-state new"


$ mkdir -p roles/etcd/tasks && vi roles/etcd/tasks/main.yml
---
- name: install etcd
  apt: name=etcd update_cache=yes

- name: copy etcd config
  template: src=etcd.conf.j2 dest=/etc/default/etcd

- name: enable etcd systemd
  service: name=etcd enabled=yes

- name: restart etcd
  service: name=etcd state=restarted
  tags:
    - restart etcd






$ ansible-playbook -i hosts 02-etcd.yml



## 특정 task 만 돌릴 경우
$ ansible-playbook -i hosts --start-at-task="Restart etcd" 02-etcd.yml

## etcd 멤버 조회 (2개가 나와야 함)
$ etcdctl member list

## etcd 테스트
$ etcdctl --endpoint http://192.168.30.13:2379 set /test "hello"
$ etcdctl --endpoint http://192.168.30.14:2379 get /test
$ etcdctl --endpoint http://192.168.30.13:2379 rm /test

$ etcdctl --no-sync --endpoint http://kube-master01:2379 --debug ls / -recursive




[ flannel 설치 ]

## flannel 데이터를 etcd 입력 및 설치
$ vi 03-flannel.yml
---
- name: Setup flannel
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - flannel


## flannel 을 위한 네트워크 값을  etcd 에 등록하는 스트립트
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/config-flannel.sh.j2
#!/bin/bash

exec curl -s -L http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'



## 좀 무식한 방식임
{% if inventory_hostname == 'kube-master01' %}
exec curl -s -L http://{{ groups['kube-masters'][0] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
{% endif %}


## flannel 을 다운 받아서 build 하는 스크립트
$ vi roles/flannel/templates/download-flannel.sh.j2
#!/bin/bash

FLANNEL_DIR=${HOME}/github/flannel
FLANNEL_VERSION="{{ flannel_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

function chdir() {
    cd $1
}

if [ ! -d ${FLANNEL_DIR} ]; then
    mkdir -p ${HOME}/github
    chdir ${HOME}/github
    git clone https://github.com/coreos/flannel.git
    chdir ${FLANNEL_DIR}
    git checkout -b ${FLANNEL_VERSION} tags/${FLANNEL_VERSION}
fi

chdir ${FLANNEL_DIR}

if [ ! -f build ]; then
cat <<EOF >build
#!/bin/bash -e

ORG_PATH="github.com/coreos"
REPO_PATH="\${ORG_PATH}/flannel"

if [ ! -h gopath/src/\${REPO_PATH} ]; then
        mkdir -p gopath/src/\${ORG_PATH}
        ln -s ../../../.. gopath/src/\${REPO_PATH} || exit 255
fi

export GOBIN=\${PWD}/bin
export GOPATH=\${PWD}/gopath

eval \$(go env)

if [ \${GOOS} = "linux" ]; then
        echo "Building flanneld..."
        go build -o \${GOBIN}/flanneld \${REPO_PATH}
else
        echo "Not on Linux - skipping flanneld build"
fi
EOF
fi

chmod 755 build
./build

mkdir -p ${ANSIBLE_HOME}/roles/flannel/files
cp bin/flanneld ${ANSIBLE_HOME}/roles/flannel/files



## flannel.service 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/flanneld.service.j2
[Unit]
Description=flanneld Service
#After=etcd.service
#Requires=etcd.service

[Service]
EnvironmentFile=/etc/default/flanneld
PermissionsStartOnly=true
User=root
ExecStart=/usr/bin/flanneld -etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
Restart=always
RestartSec=10s
RemainAfterExit=yes

[Install]
WantedBy=multi-user.target
Alias=flanneld.service


## flannel config 파일
$ vi roles/flannel/templates/flanneld.j2
FLANNEL_ETCD="http://{{ groups['kube-masters'][0] }}:4001"
FLANNEL_OPTIONS="-v 0"


## flannel 과 연계된 docker 설정 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/docker-config.sh.j2
#! /bin/bash

ip link set dev docker0 down
brctl delbr docker0

source /run/flannel/subnet.env

echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 \
-H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} \
--mtu=${FLANNEL_MTU}\" > /etc/default/docker



## flannel ansible 설치 로직
$ mkdir -p roles/flannel/tasks && vi roles/flannel/tasks/main.yml
---
# set flannel data into etcd
- name: copy config-flannel
  template: src=config-flannel.sh.j2 dest=~/config-flannel.sh mode=755
  when: inventory_hostname == 'kube-master01'

- name: run config-flannel
  command: ~/config-flannel.sh
  when: inventory_hostname == 'kube-master01'

- name: remove config-flannel
  file: name=~/config-flannel.sh state=absent
  when: inventory_hostname == 'kube-master01'

# flannel download, build, install
- name: copy download-flannel
  template: src=download-flannel.sh.j2 dest=~/download-flannel.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-flannel
  command: ~/download-flannel.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-flannel
  file: name=~/download-flannel.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: copy flanneld
  copy: src=flanneld dest=/usr/bin/flanneld owner=root mode=0755
  when: "'kube-nodes' in group_names"

- name: copy flanneld.service
  template: src=flanneld.service.j2 dest=/lib/systemd/system/flanneld.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: resize MTU
  command: ip link set dev {{ hostvars[item][item + '-iface'] }} mtu {{ mtu_size }}
  with_items: groups['kube-nodes']
  when: "'kube-nodes' in group_names"

- name: copy flanneld config
  template: src=flanneld.j2 dest=/etc/default/flanneld
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-nodes' in group_names"

- name: restart flannel
  service: name=flanneld state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart flannel
  notify:
    - restart flannel



## handler 는 task 의 notify 로 호출됨
$ mkdir -p roles/flannel/handlers && vi roles/flannel/handlers/main.yml
---
- name: restart flannel
  service: name=flanneld state=restarted
  notify:
    - stop docker
    - delete docker0
    - copy docker-config
    - run docker-config
    - remove docker-config
    - start docker
  when: "'kube-nodes' in group_names"

- name: stop docker
  service: name=docker state=stopped
  when: "'kube-nodes' in group_names"

- name: delete docker0
  command: ip link delete docker0
  ignore_errors: yes
  when: "'kube-nodes' in group_names"

- name: copy docker-config
  template: src=docker-config.sh.j2 dest=~/docker-config.sh mode=755
  when: "'kube-nodes' in group_names"

- name: run docker-config
  command: ~/docker-config.sh
  ignore_errors: true
  when: "'kube-nodes' in group_names"

- name: remove docker-config
  file: name=~/docker-config.sh state=absent
  when: "'kube-nodes' in group_names"

- name: start docker
  service: name=docker state=started
  when: "'kube-nodes' in group_names"



$ ansible-playbook -i hosts 03-flannel.yml 
 






###################################
## k8s 소스 다운로드 및 make  (ansible)
###################################
## cert 파일 만들기
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-cert.sh
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh

## kubernetes 다운로드 및 설치
$ vi 04-kubernetes.yml
---
- name: Setup kubernetes
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - kubernetes



## k8s 을 다운 받아서 build 하는 스크립트 (GOPATH 와 PATH 가 중요)
$ mkdir -p roles/kubernetes/templates && vi roles/kubernetes/templates/download-kubernetes.sh.j2
#!/bin/bash

GO_HOME=${HOME}/{{ gopath_dir }}
KUBE_HOME=${GO_HOME}/src/k8s.io/kubernetes
KUBE_VERSION="{{ kube_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

export GOPATH=${GO_HOME}:${KUBE_HOME}/Godeps/_workspace
export PATH=${GO_HOME}/bin:$PATH

function chdir() {
    cd $1
}

if [ ! -d ${KUBE_HOME} ]; then
    mkdir -p ${GO_HOME}/src/k8s.io
    chdir ${GO_HOME}/src/k8s.io
    go get -u github.com/jteeuwen/go-bindata/go-bindata
    git clone https://github.com/kubernetes/kubernetes.git
    chdir ${KUBE_HOME}
    git checkout -b ${KUBE_VERSION} origin/${KUBE_VERSION}
fi

chdir ${KUBE_HOME}
if [ ! -d ${KUBE_HOME}/_output ]; then
    make all
fi

mkdir -p ${ANSIBLE_HOME}/roles/kubernetes/files
cp _output/local/bin/linux/amd64/kube* ${ANSIBLE_HOME}/roles/kubernetes/files


## Kubernetes config 파일
$ vi roles/kubernetes/templates/kube-apiserver.conf.j2
KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:4001 \
--logtostderr=true \
--service-cluster-ip-range={{ service_cluster_ip_range }} \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,\
ResourceQuota,DenyEscalatingExec,SecurityContextDeny \
--service-node-port-range={{ service_node_port_range }} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key"


$ vi roles/kubernetes/templates/kube-apiserver.service.j2
[Unit]
Description=Kubernetes API Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-apiserver
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-controller-manager.conf.j2
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true"


$ vi roles/kubernetes/templates/kube-controller-manager.service.j2
[Unit]
Description=Kubernetes Controller Manager
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-controller-manager
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kube-scheduler.conf.j2
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--master=127.0.0.1:8080"


$ vi roles/kubernetes/templates/kube-scheduler.service.j2
[Unit]
Description=Kubernetes Scheduler
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-scheduler
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kubelet.conf.j2
KUBELET_OPTS="--address=0.0.0.0 \
--port=10250 \
--hostname-override={{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }} \
--api-servers=http://{{ kube_api_ip }}:8080 \
--logtostderr=true \
--cluster-dns={{ cluster_dns }} \
--cluster-domain={{ cluster_domain }}"


$ vi roles/kubernetes/templates/kubelet.service.j2
[Unit]
Description=Kubernetes Kubelet
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kubelet
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-proxy.conf.j2
KUBE_PROXY_OPTS="--master=http://{{ kube_api_ip }}:8080 --logtostderr=true"


$ vi roles/kubernetes/templates/kube-proxy.service.j2
[Unit]
Description=Kubernetes Proxy Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-proxy
ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



## kubernetes ansible 설치 로직
$ mkdir -p roles/kubernetes/tasks && vi roles/kubernetes/tasks/main.yml
---
- name: copy download-kubernetes.sh
  template: src=download-kubernetes.sh.j2 dest=~/download-kubernetes.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-kubernetes.sh
  command: ~/download-kubernetes.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-kubernetes.sh
  file: name=~/download-kubernetes.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: add kube-cert group
  group: name=kube-cert state=present

- name: make cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: make ca-cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh {{ kube_api_ip }}
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: change mod cert
  file:
    path: /srv/kubernetes
    mode: 0755
    recurse: yes
  when: "'kube-deploy' in group_names"

- name: create cert directory
  file: path=/srv/kubernetes state=directory owner=root mode=0755

- name: copy server.cert
  copy: src=/srv/kubernetes/server.cert dest=/srv/kubernetes/server.cert
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy server.key
  copy: src=/srv/kubernetes/server.key dest=/srv/kubernetes/server.key
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy ca.crt
  copy: src=/srv/kubernetes/ca.crt dest=/srv/kubernetes/ca.crt
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy kubectl
  copy: src=kubectl dest=/usr/local/bin/kubectl
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver
  copy: src=kube-apiserver dest=/usr/local/bin/kube-apiserver
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager
  copy: src=kube-controller-manager dest=/usr/local/bin/kube-controller-manager
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler
  copy: src=kube-scheduler dest=/usr/local/bin/kube-scheduler
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kubelet
  copy: src=kubelet dest=/usr/local/bin/kubelet
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy
  copy: src=kube-proxy dest=/usr/local/bin/kube-proxy
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-apiserver config
  template: src=kube-apiserver.conf.j2 dest=/etc/default/kube-apiserver
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver.service
  template: src=kube-apiserver.service.j2 dest=/lib/systemd/system/kube-apiserver.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager config
  template: src=kube-controller-manager.conf.j2 dest=/etc/default/kube-controller-manager
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager.service
  template: src=kube-controller-manager.service.j2 dest=/lib/systemd/system/kube-controller-manager.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler config
  template: src=kube-scheduler.conf.j2 dest=/etc/default/kube-scheduler
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler.service
  template: src=kube-scheduler.service.j2 dest=/lib/systemd/system/kube-scheduler.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kubelet config
  template: src=kubelet.conf.j2 dest=/etc/default/kubelet
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kubelet.service
  template: src=kubelet.service.j2 dest=/lib/systemd/system/kubelet.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy config
  template: src=kube-proxy.conf.j2 dest=/etc/default/kube-proxy
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy.service
  template: src=kube-proxy.service.j2 dest=/lib/systemd/system/kube-proxy.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-masters' in group_names"

- name: restart kube-apiserver
  service: name=kube-apiserver state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-apiserver

- name: restart kube-controller-manager
  service: name=kube-controller-manager state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-controller-manager

- name: restart kube-scheduler
  service: name=kube-scheduler state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-scheduler

- name: restart kubelet
  service: name=kubelet state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kubelet

- name: restart kube-proxy
  service: name=kube-proxy state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kube-proxy




ansible-playbook -i hosts 04-kubernetes.yml






















반응형
Posted by seungkyua@gmail.com
,
반응형
## root 계정으로 수행해야 함


# apt-get update
# apt-get install -y gcc make
# apt-get install -y python-pip python-dev python3-dev libevent-dev \
                            vlan libvirt-bin bridge-utils lvm2 openvswitch-switch \
                            python-libvirt nbd-client ipset ntp python-lzma \
                            p7zip-full arping qemu-kvm

# apt-get install -y python-tox libmysqlclient-dev libpq-dev \
                           libxml2-dev libxslt1-dev libvirt-dev libffi-dev

# apt-get install -y virtinst libsemanage1-dev python-semanage \
                            attr policycoreutils


## avocado 설치
# cd ~
# mkdir avocado && cd avocado
# git clone git://github.com/avocado-framework/avocado.git
# cd avocado
# make requirements
# python setup.py install


##  avocado plugin 설치 (avocado-vt)
# cd ~/avocado
# cd avocado
# make requirements-plugins
# make link


# vi ~/.config/avocado/avocado.conf
[datadir.paths]
base_dir = /root/avocado/avocado
test_dir = /root/avocado/avocado/examples/tests
data_dir = /usr/share/avocado/data
logs_dir = /root/avocado/avocado/job-results



## Bootstrapping Avocado-VT (vt-type : qemu, libvirt .....)
# ./scripts/avocado vt-bootstrap --vt-type libvirt



## Avocado plugins list 보기
# ./scripts/avocado plugins


## vt-type 별 test list 보기 (vt-type : qemu, libvirt .....)
# ./scripts/avocado list --vt-type libvirt --verbose


## libvirt test case 한개 돌리기
# ./scripts/avocado run type_specific.io-github-autotest-qemu.driver_load.with_balloon


## 결과 보기
# cd /root/avocado/avocado/job-results/job-2016-08-31T09.17-1daa785/\
html/results.html


## 전체 테스트 돌리기
# ./scripts/avocado run type_specific










반응형
Posted by seungkyua@gmail.com
,
반응형

1. python 라이브러리

NumPy : 벡터, 행렬을 다루는 수치계산

SciPy : 과학 계산

Matplotlib: 그래프 그리기

Pandas: R과 유사한 데이터 프레임을 제공

PIL: 이미지 데이터를 다루는 라이브러리

Scikit-learn: 머신러닝

IPython: 대화형 cli


2. Enthought Canopy : python GUI 통합 분석 환경 설치

## https://www.enthought.com/products/canopy/


## 다운로드

## https://store.enthought.com/downloads/#default


## 예제소스 다운로드

$ cd ~/Documents/github

$ git clone https://github.com/enakai00/ml4se.git

$ cd ml4se

$ unzip ml4se.zip

$ source ./ml4se/config_mac.sh



















반응형
Posted by seungkyua@gmail.com
,

DPDK in docker

Container 2016. 7. 12. 01:17
반응형

[ DPDK install on Host ]

## http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html

## http://dannykim.me/danny/openflow/86417

## https://github.com/bisdn/dpdk-dev/blob/master/tools/setup.sh


## High Precision Timer(HPET) set 확인

$ grep hpet /proc/timer_list


## hugepage 세팅

# echo 256 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages


## Using Hugepages with the DPDK

# mkdir -p /mnt/huge

# mount -t hugetlbfs nodev /mnt/huge

# vi /etc/fstab

nodev /mnt/huge hugetlbfs defaults 0 0


## dpdk download

## http://dpdk.org/browse/dpdk/

$ mkdir -p dpdk && cd dpdk

$ curl -OL http://dpdk.org/browse/dpdk/snapshot/dpdk-16.04.tar.gz


$ tar xzvf dpdk-16.04.tar.gz

$ cd dpdk-16.04


## Installation of DPDK Target Environments

$ sudo yum update

$ sudo yum install -y sudo gcc libhugetlbfs-utils libpcap-devel \

kernel kernel-devel kernel-headers


$ make config T=x86_64-native-linuxapp-gcc

$ make


## Loading Modules to Enable Userspace IO for DPDK

$ sudo modprobe uio_pci_generic

$ sudo insmod build/kmod/igb_uio.ko

$ lsmod | grep igb_uio


## Loading VFIO Module

$ sudo modprobe vfio-pci


$ vi ~/.bashrc

export RTE_SDK=$HOME/dpdk/dpdk-16.04

export RTE_TARGET=build


$ source ~/.bashrc

$ env | grep RTE


$ cd examples/helloworld

$ make

$ cd build/app

$ sudo ./helloworld -c 3 -n 3



[ DPDK install in docker ]

## Host 에 미리 세팅

## hugepage 세팅

# echo 256 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages


## Using Hugepages with the DPDK

# mkdir -p /mnt/huge

# mount -t hugetlbfs nodev /mnt/huge

# vi /etc/fstab

nodev /mnt/huge hugetlbfs defaults 0 0





$ mkdir -p github

$ cd github


$ git clone https://github.com/seungkyua/dpdk-docker-helloworld.git

$ cd dpdk-docker-helloworld

$ cp -R /lib/modules .

$ cp -R /usr/src/kernels/ .





$ vi Dockerfile

FROM centos:7.2.1511

MAINTAINER Seungkyu Ahn <seungkyua@gmail.com>

ENV REFRESHED_AT 2016-07-13


LABEL docker run --rm -it --privileged \

-v /sys/bus/pci/devices:/sys/bus/pci/devices \

-v /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages \

-v /sys/devices/system/node:/sys/devices/system/node \

-v /dev:/dev \

-v /mnt/huge:/mnt/huge \

--name dpdk-docker dpdk-docker-helloworld


ENV DPDK_VERSION=16.04

ENV RTE_SDK=/root/dpdk-${DPDK_VERSION}

ENV RTE_TARGET=build


USER root


# yum update

RUN yum -y update

RUN yum install -y deltarpm gcc make libhugetlbfs-utils libpcap-devel; yum clean all

RUN yum install -y kernel kernel-devel kernel-headers; yum clean all


# dpdk download

COPY modules /lib/modules

COPY kernels /usr/src/kernels

WORKDIR /root

RUN curl -OL http://dpdk.org/browse/dpdk/snapshot/dpdk-${DPDK_VERSION}.tar.gz

RUN tar xzf dpdk-${DPDK_VERSION}.tar.gz

WORKDIR /root/dpdk-${DPDK_VERSION}

RUN make config T=x86_64-native-linuxapp-gcc

RUN make


# Loading Modules to Enable Userspace IO for DPDK

RUN modprobe uio_pci_generic

RUN modprobe vfio-pci


WORKDIR examples/helloworld

RUN make

WORKDIR build/app

ENTRYPOINT [ "./helloworld" ]

CMD [ "-c", " 3", "-n", "3" ]






$ docker build -t dpdk-docker-helloworld .


$ docker run --rm -it --privileged \

-v /sys/bus/pci/devices:/sys/bus/pci/devices \

-v /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages \

-v /sys/devices/system/node:/sys/devices/system/node \

-v /dev:/dev \

-v /mnt/huge:/mnt/huge \

--name dpdk-docker dpdk-docker-helloworld









## 에러 날 때 패키지 찾아서 설치

yum provides '*/applydeltarpm'

yum install deltarpm


## 아래 패키지 에러 날 때 해결책

Rpmdb checksum is invalid: dCDPT(pkg checksums): perl-HTTP-Tiny.noarch 0:0.033-3.el7 - u


$ sudo yum provides '*/perl-HTTP-Tiny'


















반응형
Posted by seungkyua@gmail.com
,

Mesos 설치하기

Container 2016. 6. 19. 22:38
반응형
[ 서버 리스트 ]
mesos-master1   10.0.0.14      192.168.30.134
mesos-agent1     10.0.0.15      192.168.30.135
mesos-bootstrap 10.0.0.16      192.168.30.136
mesos-agent2     10.0.0.17      192.168.30.137



[ OpenStack 에 CentOS 7 다운 및 image 설치 ]
$ . ~/admin/admin-openrc
$ wget http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2

$ openstack image create \
          --file CentOS-7-x86_64-GenericCloud.qcow2 \
          --disk-format qcow2 --container-format bare \
          --public centos-7
$ openstack image list


## Flavor 생성
$ openstack flavor create --id mm1 --ram 32768 --disk 160 --vcpus 4 mesos.master
$ openstack flavor create --id ma1 --ram 16384 --disk 100 --vcpus 2 mesos.agent


## quota 조회
$ tenant=$(openstack project list | awk '/demo/ {print $2}')
$ nova quota-show --tenant $tenant
$ cinder quota-show $tenant
$ openstack stack show $tenant


## quota 업데이트
$ nova quota-update --ram -1 $tenant
$ nova quota-update --cores -1 $tenant
$ nova quota-update --instances -1 $tenant
$ nova quota-update --floating-ips -1 $tenant
$ nova quota-show --tenant $tenant


$ cinder quota-update --volumes -1 $tenant
$ cinder quota-update --snapshots -1 $tenant
$ cinder quota-update --backups -1 $tenant
$ cinder quota-update --gigabytes -1 $tenant
$ cinder quota-update --backup-gigabytes -1 $tenant
$ cinder quota-show --tenant $tenant


## nova boot
$ . ~/demo/demo-openrc
$ openstack server create --image 615962cd-4905-4be8-a442-b8ca9b75e720 \
       --flavor mm1 --nic net-id=03a6de58-9693-4c41-9577-9307c8750141 \
       --key-name magnum-key --security-group default mesos-master1

$ openstack server create --image 615962cd-4905-4be8-a442-b8ca9b75e720 \
       --flavor ma1 --nic net-id=03a6de58-9693-4c41-9577-9307c8750141 \
       --key-name magnum-key --security-group default mesos-agent1

$ openstack server create --image 615962cd-4905-4be8-a442-b8ca9b75e720 \
       --flavor ma1 --nic net-id=03a6de58-9693-4c41-9577-9307c8750141 \
       --key-name magnum-key --security-group default mesos-bootstrap


## 접속 테스트
$ sudo ip netns exec qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc \
ssh -i ~/magnum-key centos@10.0.0.14


## floating ip 생성
$ openstack ip floating create public
$ openstack ip floating list
$ openstack ip floating add 192.168.30.134 mesos-master1
$ openstack ip floating add 192.168.30.135 mesos-agent1
$ openstack ip floating add 192.168.30.136 mesos-bootstrap
$ openstack ip floating add 192.168.30.137 mesos-agent2



[ Bootstrap node 에서 ssh 복사 ]
$ ssh-keygen -t rsa
$ ssh-copy-id 10.0.0.14
$ ssh-copy-id 10.0.0.15
$ ssh-copy-id 10.0.0.17

$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id 10.0.0.14
# ssh-copy-id 10.0.0.15
# ssh-copy-id 10.0.0.17


[ Bootstrap, Master Node, Agent Node 공통 ]
$ sudo yum update
$ sudo yum upgrade -y
$ sudo systemctl stop firewalld && sudo systemctl disable firewalld


## CentOS 7.2 Upgrade
$ sudo yum upgrade --assumeyes --tolerant
$ sudo yum update --assumeyes
$ uname -r
3.10.0-327.18.2.el7.x86_64


## Docker 에서 OverlayFS 사용
$ sudo tee /etc/modules-load.d/overlay.conf <<-'EOF'
overlay
EOF


## reboot
$ reboot


## OverlayFS 확인 
$ lsmod | grep overlay


## Docker yum Repo
$ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/$releasever/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF


## Docker Daemon with OverlayFS
$ sudo mkdir -p /etc/systemd/system/docker.service.d && sudo tee /etc/systemd/system/docker.service.d/override.conf <<- EOF
[Service]
ExecStart=
ExecStart=/usr/bin/docker daemon --storage-driver=overlay -H fd://
EOF


## Install Docker engin, daemon and service
$ sudo yum install --assumeyes --tolerant docker-engine
$ sudo systemctl start docker
$ sudo systemctl enable docker

## Test docker
$ sudo docker ps



[ DC/OS 설치 ]
[ Bootstrap Node ]
## DC/OS installer download
$ mkdir -p installer
$ cd installer


[ WEB 으로 설치 ]
## start web installer (9000 port)
$ sudo bash dcos_generate_config.sh --web -v


## web 으로 접속


## ip detect script
$ vi ip_detect.sh
#!/bin/bash

IP=$(ip addr show eth0 | awk '/inet /{print substr($2,0,9)}')
echo $IP


[ GUI 로 설치하고 나서 다시 설치할려면 아래 삭제 필요 ]
## 모든 클러스터 노드
$ sudo rm -rf /opt/mesosphere

## bootstrap 노드
$ sudo rm -rf /var/lib/zookeeper



[ CLI 로 설치 ]
$ cd installer
$ mkdir -p genconf && cd genconf
$ sudo vi config.yaml
---
agent_list:
- 10.0.0.15
- 10.0.0.17
#bootstrap_url: file:///opt/dcos_install_tmp
bootstrap_url: http://192.168.30.136:80
cluster_name: DCOS
exhibitor_storage_backend: static
ip_detect_filename: /genconf/ip-detect
master_discovery: static
master_list:
- 10.0.0.14
process_timeout: 10000
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
ssh_user: centos


$ vi ip-detect
#!/bin/bash

IP=$(ip addr show eth0 | awk '/inet /{print substr($2,0,9)}')
echo $IP


$ cd ..
$ sudo bash dcos_generate_config.sh


$ sudo docker run -d -p 80:80 -v $PWD/genconf/serve:/usr/share/nginx/html:ro nginx



[ 모든 Master Node 에서 실행 ]
$ mkdir /tmp/dcos && cd /tmp/dcos
$ curl -O http://10.0.0.16:80/dcos_install.sh
$ sudo bash dcos_install.sh master



[ 모든 Agent Node 에서 실행 ]
$ sudo yum install -y ipset unzip
$ sudo groupadd nogroup
$ mkdir /tmp/dcos && cd /tmp/dcos
$ curl -O http://10.0.0.16:80/dcos_install.sh
$ sudo bash dcos_install.sh slave


## Master Node 설치 상황 보기
$ http://192.168.30.134:8181/exhibitor/v1/ui/index.html





[ CLI 설치 ]
## pip 설치
$ curl -O https://bootstrap.pypa.io/get-pip.py
$ sudo python get-pip.py

$ sudo pip install virtualenv

$ mkdir ~/dcos && cd ~/dcos
$ curl -O https://downloads.dcos.io/dcos-cli/install.sh

$ bash install.sh . http://192.168.30.134

## dcos 명령어를 사용할려면 아래와 같이 실행
source /home/centos/dcos/bin/env-setup


## 다음의 파일 값을 보여줌   :   ~/./dcos/dcos.toml 
$ dcos config show


[ package 조회 및 서비스 설치 ]
$ dcos package search
arangodb          0.3.0
cassandra         1.0.5-2.2.5
chronos           2.4.0
jenkins           0.2.3
kafka             1.0.9-0.10.0.0
marathon          1.1.1
spark             1.0.0-1.6.1-2
avi               16.2
calico            0.1.0
concord           0.3.16.4
confluent         1.0.3-3.0.0
crate             0.1.0
datadog           5.4.3
elasticsearch     0.7.0
etcd              0.0.3
exhibitor         1.0.0
hdfs              2.5.2-0.1.9
hue               0.0.1
kafka-manager     1.3.0.8
linkerd           0.6.0-0.1
marathon-lb       1.2.2
memsql            0.0.1
mr-redis          0.0.1
mysql             5.7.12
namerd            0.6.0-0.1
nginx             1.8.1
openvpn           0.0.0-0.1
openvpn-admin     0.0.0-0.1
quobyte           1.2.1
riak              0.1.1
ruxit             0.1
spark-notebook    0.1.0
storm             0.1.0
vamp              0.8.5
weavescope        0.15.0                                                 
weavescope-probe  0.15.0
zeppelin          0.6.0


$ dcos package install zeppelin
$ dcos package install spark










반응형
Posted by seungkyua@gmail.com
,

localconf

OpenStack 2016. 5. 27. 10:59
반응형

[[local|localrc]]

ADMIN_PASSWORD=secret

DATABASE_PASSWORD=$ADMIN_PASSWORD

RABBIT_PASSWORD=$ADMIN_PASSWORD

SERVICE_PASSWORD=$ADMIN_PASSWORD

HOST_IP=10.40.102.84 // VM IP로 변경

# Do not use Nova-Network

disable_service n-net

# Enable Neutron

ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3

## Neutron options

Q_USE_SECGROUP=True

FLOATING_RANGE="10.40.102.0/24"

FIXED_RANGE="10.0.0.0/24"

Q_FLOATING_ALLOCATION_POOL=start=10.40.102.250,end=10.40.102.254

PUBLIC_NETWORK_GATEWAY="10.40.102.1"

PUBLIC_INTERFACE=eth0

# Open vSwitch provider networking configuration

Q_USE_PROVIDERNET_FOR_PUBLIC=True

OVS_PHYSICAL_BRIDGE=br-ex

PUBLIC_BRIDGE=br-ex

OVS_BRIDGE_MAPPINGS=public:br-ex

# Disable Identity v2

ENABLE_IDENTITY_V2=False

반응형
Posted by seungkyua@gmail.com
,
반응형

1. 로그 로테이트 설정

    - 로그 파일이 쌓이는 것을 막아줌


2. Availability Zone 과 Aggregate Host 설정

    - VM 을 효율적으로 스케줄링 할 수 있음.


3. cpu, memory, disk ratio 설정

    - overcommit 을 고려


4. Nova Compute 에서 inject password 나 inject file 을 false 로 설정 

    - VM 부팅 속도를 빠르게 함


5. Cinder QoS, Network QoS 설정

    - 스토리지와 Network 의 QoS 설정으로 간섭을 최소화 함


6. Neutron Network 설정 정보

    - Provider Network 를 사용해야 tunneling 이 없어 속도가 빠름


7. live migration 설정

    - maxdowntime 을 적절히 설정해야 함


8. 캐시가 안되어 있는 새로운 이미지로 여러 VM 동시 생성 속도 측정

    - 이미지를 가져오는 이슈로 네트워크 대역폭을 다 소모할 수 있음

    - 사전에 이미지가 캐시되게 모든 host 에 해당 vm 을 미리 생성


9. VM 인스턴스 데이터가 저장되는 /var/lib/nova 의 디스크 사이즈가 충분한지 검증



To be continue ...











반응형
Posted by seungkyua@gmail.com
,

Docker 교육 on CentOS7

Container 2016. 4. 28. 09:41
반응형

[ VMWare Player Download ]

https://my.vmware.com/en/web/vmware/free#desktop_end_user_computing/

vmware_workstation_player/12_0


[ CentOS 7 다운로드 ]

http://isoredirect.centos.org/centos/7/isos/x86_64/CentOS-7-x86_64-Minimal-1511.iso



[ Network 설정 ]

VMnet8 192.168.75.1


https://www.lesstif.com/pages/viewpage.action?pageId=13631535

https://www.centos.org/docs/5/html/Deployment_Guide-en-US/s1-dhcp-configuring-client.html


vi /etc/sysconfig/network-scripts/ifcfg-eno16777728

TYPE=Ethernet

BOOTPROTO=none

DEFROUTE=yes

PEERDNS=yes

PEERROUTES=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_PEERDNS=yes

IPV6_PEERROUTES=yes

IPV6_FAILURE_FATAL=no

NAME=eno16777728

UUID=aa6807ce-8df6-428d-8af3-f21915570efb

DEVICE=eno16777728

ONBOOT=yes

PREFIX=24

GATEWAY=192.168.75.2

DNS1=192.168.75.2

IPADDR=192.168.75.133



service network restart



[ sudo 세팅 ]

stack   ALL=(ALL:ALL) NOPASSWD:ALL



[ 기술적 컴포넌트 ]

컨테이너 포맷 : libcontainer (네이티브 리눅스 컨테이너 포맷),  lxc (일반적인 컨테이너 포맷)

리눅스 커널 Namespace : 파일시스템, 프로세스, 네트워크간 독립

리눅스 커널 cgroups : CPU, 메모리 고립 및 그룹핑

copy-on-wirte(CoW) : 파일시스템이 복사-쓰기로 생성. 파일시스템이 레이어로 되어 있음

로그 : STDOUT, STDERR, STDIN 이 로그로 쓰여짐

Interactive shell : pseudo-tty 를 생성하여 STDIN 을 연결, 컨테이너와 Interactiv shell로 통신



[ Disk Type ]

AUFS, zfs, btrfs, vfs, Device-mapper, overlayfs



[ 구성 요소 ]

Docker client             : DOCKER_HOST=tcp://192.168.75.133:2375

Docker server            : /etc/sysconfig/docker, /etc/sysconfig/docker-network

Docker images

Docker container



[ Container boot 순서 ]

bootfs -> 컨테이너 메모리로 이동 -> bootfs umount -> initrd 가 사용하는 RAM 해제 ->
rootfs mount (읽기 전용 모드, os 이미지) -> 읽기,쓰기 파일 시스템 mount



[ Device Mapper ]

sudo yum install -y device-mapper


ls -l /sys/class/misc/device-mapper

sudo grep device-mapper /proc/devices

sudo modprobe dm_mod




[ 사전 필요 패키지 설치 ]

sudo yum -y update

sudo yum -y install git tree


yum whatprovides netstat

yum -y install net-tools


 

[ CentOS6 docker 설치 ]

$ sudo rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/

epel-release-6-8.noarch.rpm

$ sudo yum -y install lxc-docker



## docker 설치

$ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'

[dockerrepo]

name=Docker Repository

baseurl=https://yum.dockerproject.org/repo/main/centos/7/

enabled=1

gpgcheck=1

gpgkey=https://yum.dockerproject.org/gpg

EOF


$ sudo yum install docker-engine


## docker 옵션 수정

$ sudo vi /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/docker daemon -H unix:///var/run/docker.sock \

-H tcp://0.0.0.0:2375


$ sudo systemctl daemon-reload

$ sudo systemctl restart docker

$ sudo systemctl status docker


## 부팅 때 자동으로 실행

$ sudo systemctl enable docker


## stack 유저에 docker 그룹을 추가

$ sudo usermod -aG docker stack



## docker overlayFS 적용

$ sudo tee /etc/modules-load.d/overlay.conf <<-'EOF'

overlay

EOF



## reboot

$ sudo reboot



## OverlayFS 확인 

$ lsmod | grep overlay


$ sudo mkdir -p /etc/systemd/system/docker.service.d && sudo tee /etc/systemd/system/docker.service.d/override.conf <<- EOF

[Service]

ExecStart=

ExecStart=/usr/bin/docker daemon --storage-driver=overlay -H fd:// \

-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375

EOF


$ sudo systemctl daemon-reload

$ sudo systemctl restart docker




## client 접속

## 서버 옵션을 tcp 로 설정했을 때 아래로 접속

export DOCKER_HOST=tcp://192.168.75.133:2375

$ docker ps



## 서버 옵션을 아무 옵션을 안주면 unix socket 으로 접속

docker -H unix:///var/run/docker.sock ps


$ export DOCKER_HOST=unix:///var/run/docker.sock

$ docker ps


## 환경변수로 추가

$ vi ~/.bashrc

export DOCKER_HOST=tcp://192.168.75.133:2375





## docker uninstall

$ yum list installed | grep docker

$ sudo yum -y remove docker-engine.x86_6


## image, 컨테이너까지 삭제

$ sudo rm -rf /var/lib/docker




# yum 으로 docker 설치 후 최신으로 업그레이드

https://get.docker.com 접속해서 참고




## docker run 및 overlayfs 보기, 스토리지 선택

## docker 스토리지에 대한 설명

http://play.joinc.co.kr/w/man/12/docker/storage


## https://docs.docker.com/engine/userguide/storagedriver/selectadriver/


## Docker image 빌드

$ docker build -t example/docker-node-hello:latest .

$ docker build --no-cache -t example/docker-node-hello:latest .



## Docker run

$ docker run -d -p 8090:8080 --name node-hello example/docker-node-hello:latest


$ docker run -d -p 8090:8080 -e WHO="Seungkyu Ahn" --name node-hello \

          example/docker-node-hello:latest



## docker 안으로 들어가서 mounts 정보 확인

$ docker exec -it 3c3ca0ce3470 /bin/bash


root@3c3ca0ce3470:/data/app# touch bbb.txt

root@3c3ca0ce3470:/data/app# cat /proc/mounts | grep overlay

lowerdir=/var/lib/docker/overlay/

cc4f0662e566f0ad9069abfd523ff67c38a41488aaaa06d474cb027ca64cafa2/root

upperdir=/var/lib/docker/overlay/

ede9464970bb229267c8c548f8612e801002cec2d4f524378f5acb58ccde0d98/upper

workdir=/var/lib/docker/overlay/

ede9464970bb229267c8c548f8612e801002cec2d4f524378f5acb58ccde0d98/work




## docker volume 확인

# cd /var/lib/docker/overlay

# cd ede9464970bb229267c8c548f8612e801002cec2d4f524378f5acb58ccde0d98

# ls -al

-rw-r--r--.  1 root root   64 Jul 11 03:22 lower-id

drwx------.  2 root root    6 Jul 11 03:22 merged

drwxr-xr-x.  9 root root 4096 Jul 11 03:26 upper

drwx------.  3 root root   17 Jul 11 03:22 work


# cat lower-id

cc4f0662e566f0ad9069abfd523ff67c38a41488aaaa06d474cb027ca64cafa2


# find . -name bbb.txt

./upper/data/app/bbb.txt



## overlay volume 의 구조

기본 image volume : lower-id 의 root directory

컨테이너 volume : upper directory




$ docker 소스 다운로드

go get github.com/docker/docker


## dependency (cmd/docker/docker.go)

$ go get github.com/Sirupsen/logrus


## dependency (cmd/dockerd/daemon.go)

$ go get github.com/docker/distribution

$ go get github.com/docker/go-connections



## docker Contributor 가 되고 싶으면 아래 URL 참조

## https://github.com/docker/docker/tree/master/project

## github 에서 docker/docker 프로젝트를 fork


git clone https://github.com/seungkyua/docker.git docker-fork

$ cd docker-fork


git config --local user.name "Seungkyu Ahn"

$ git config --local user.email "seungkyua@gmail.com"


$ git remote add upstream https://github.com/docker/docker.git


$ git config --local -l

$ git remote -v


$ git checkout -b dry-run-test


$ git branch

* dry-run-test

  master


$ touch TEST.md


$ git add TEST.md



## -s 옵션은 커밋 메세지에 정보를 자동으로 넣어 줌.

## Signed-off-by: Seungkyu Ahn <seungkyua@gmail.com>

## commit 로그에 들어가야할 내용

## 버그 수정일 때

fixes #xxxx,  closes #xxxx


**- What I did**

**- How I did it**

**- How to verify it**

**- Description for the changelog**

<!--

Write a short (one line) summary that describes the changes in this

pull request for inclusion in the changelog:

-->



$ git commit -s -m "Making a dry run test."


$ git push --set-upstream origin dry-run-test

Username for 'https://github.com': seungkyua

Password for 'https://seungkyua@github.com':




## Mac 에 docker 설치

## https://docs.docker.com/machine/install-machine/

$ docker-machine create --driver virtualbox default-docker

docker-machine ls

$ docker-machine env default-docker

$ eval "$(docker-machine env default-docker)"



## contribute 계속

## build a development environment image and run it in a container.

$ make shell


## In docker container, make docker binary

root@143823c11fba:/go/src/github.com/docker/docker# hack/make.sh binary


## binary 복사

# cp bundles/1.12.0-dev/binary-client/docker* /usr/bin

# cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin


## docker running background

# docker daemon -D&




## 다시 docker 로 접속하여 파일 수정 후 컴파일

# vi api/client/container/attach.go

42           flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN \

43           (standard in)")


# hack/make.sh binary

# cp bundles/1.12.0-dev/binary-client/docker* /usr/bin

# cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin

# docker daemon -D&


## 변경된 내용 확인

# docker attach --help



## 테스트 (arguements 에 따라 수행하는 테스트의 종류가 다름)

## test : Run the unit, integration and docker-py tests.

## test-unit : Run just the unit tests.

## test-integration-cli : Run the test for the integration command line interface.

## test-docker-py : Run the tests for Docker API client.

$ make test



## development container 안에서 테스트 하는 방법

$ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker \

docker-dev:dry-run-test /bin/bash


## hack/make.sh 를 활용하되 dynbinary binary cross 는 반드시 target 으로 지정

# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py

or unit test 만 수행

# hack/make.sh dynbinary binary cross test-unit



## Unit Test 수행

$ TESTDIRS='opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit



## Integration Test 수행

$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli

or development container 안에서 테스트

# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli



## 이슈를 생성하고 local branch, remote repository, docker repository 를 맞추는 방법

## https://docs.docker.com/opensource/workflow/find-an-issue/

## 이슈에 labels 은 자신의 상황에 맞게 두 종류를 붙혀야 함

exp/beginner, exp/intermediate, exp/expert

kind/bug, kind/docs, kind/enhancement, kind/feature, kind/question


## issue 에 #dibs 라고 코멘트를 달면 자기가 하겠다는 뜻임.


## master 로 체크아웃

$ git checkout master


## docker repository 로 부터 최신 코드를 local 로 맞춤

git fetch upstream master

$ git rebase upstream/master


## local 최신 코드를 remote repository 에 맞춤

$ git push origin master


## 이슈번호 11038 에 맞는 branch 생성

$ git checkout -b 11038-fix-rhel-link


## 혹시 몰라 docker repository 의 최신코드를 branch 에 맞춤

$ git rebase upstream/master





[ docker 가 안 뜰 때 or 에러 일 때 깨끗하게 지우기 ]

systemctl status docker.service     


# mount 에러 일 때

du -h /var/lib/docker/

/var/lib/docker/container/ 아래의 파일을 삭제

/var/lib/docker/devicemapper/metadata/ 아래의 파일을 삭제

/var/lib/docker/devicemapper/mnt/ 아래의 파일을 삭제

/var/lib/docker/volumes/ 아래 파일을 삭제

/var/lib/docker/graph/ 아래 파일을 삭제

/var/lib/docker/linkgraph.db 파일 삭제


/var/run/docker.pid 삭제

/var/run/docker.sock 삭제


# device mapper 삭제

lsblk

grep docker /proc/*/mounts

systemd-cgls

dmsetup ls

ls -al /dev/mapper/docker-*      # 결과 리스트를 $dm 이라 한다면

umount $dm

dmsetup remove $dm




[ Debug 설정 ]

# /usr/lib/systemd/system/docker.service 을 수정하면

/etc/systemd/system/multi-user.target.wants/docker.service 와

/lib/systemd/system/docker.service 도 자동으로 수정됨


vi /usr/lib/systemd/system/docker.service

...

ExecStart=/bin/sh -c 'DEBUG=1 /usr/bin/docker daemon $OPTIONS \

...


sudo systemctl daemon-reload

sudo systemctl restart docker



[ system service 확인 ]

systemctl list-units --type service

systemctl list-unit-files



[ docker 설치 Test ]

docker run --rm -ti centos:latest /bin/bash



[ Sample Dockerfile ]

FROM node:0.10

MAINTAINER Anna Doe <anna@example.com>

LABEL "rating"="Five Stars" "class"="First Class"


USER root

ENV AP /data/app

ENV SCPATH /etc/supervisor/conf.d

RUN apt-get -y update


# The daemons

RUN apt-get -y install supervisor

RUN mkdir -p /var/log/supervisor

   

# Supervisor Configuration

ADD ./supervisord/conf.d/* $SCPATH/


# Application Code

ADD *.js* $AP/

WORKDIR $AP

RUN npm install

CMD ["supervisord", "-n"]



git clone https://github.com/spkane/docker-node-hello.git

cd docker-node-hello


tree -a -I .git







[ Docker Hub Registry ]

# 사용자 로그인

docker login


Username: seungkyua

Password: 

Email: seungkyua@gmail.com

WARNING: login credentials saved in /root/.docker/config.json

Login Succeeded


# 사용자 로그아웃

docker logout


# hub 에 push

docker tag example/docker-node-hello seungkyua/docker-node-hello



# restart 옵션

docker run -ti --restart=on-failure:3 -m 200m --memory-swap=300m \

     progrium/stress --cpu 2 --io 1 --vm 2 --vm-bytes 128M --timeout 120s



# stop

docker stop -t 25 node-hello       #  stop 은 SIGTERM,   25초 기다리고 t 옵션은 SIGKILL




[ container, image, volume 삭제하기 ] 

# delete all stopped docker

docker rm $(docker ps -a -q)


# delete untagged images

docker rmi $(docker images -q -f "dangling=true")


# delete volumes

docker volume rm $(docker volume ls -qf dangling=true)



[ Docker 정보 ]

docker version

docker info




[ docker inspect ]

docker pull ubuntu:latest

docker run -d -t --name ubuntu ubuntu /bin/bash

docker inspect node-hello

docker inspect --format='{{.State.Running}}' node-hello

docker inspect -f '{{.State.Pid}}' node-hello

docker inspect -f '{{.NetworkSettings.IPAddress}}' node-hello

docker inspect -f '{{.Name}} {{.State.Running}}' ubuntu node-hello


# list all port bindings

docker inspect -f '{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' node-hello


# specific port mapping

docker inspect -f '{{(index (index .NetworkSettings.Ports "8080/tcp") 0).HostPort}}' node-hello


# json print

docker inspect -f '{{json .Config}}' node-hello | python -mjson.tool



[ Docker 안으로 들어가는 두가지 방법 ]

docker exec -it ubuntu /bin/bash


docker inspect ubuntu | grep \"Pid\":

sudo nsenter --target [Pid] --mount --uts --ipc --net --pid



[ docker logs & stats ]

docker logs node-hello

docker stats node-hello


curl -s http://192.168.75.133:2375/v1.21/containers/node-hello/stats | head -1 | python -mjson.tool


# cAdvisor

docker run \

     --volume=/:/rootfs:ro \

     --volume=/var/run:/var/run:rw \

     --volume=/sys:/sys:ro \

     --volume=/var/lib/docker/:/var/lib/docker:ro \

     --publish=8091:8080 \

     --detach=true \

     --name=cadvisor \

     google/cadvisor:latest




[ ssh dockerfile ]

vi Dockerfile


FROM ubuntu:14.04

MAINTAINER Sven Dowideit <SvenDowideit@docker.com>

ENV REFRESHED_AT 2016-04-30


RUN apt-get update && apt-get install -y openssh-server

RUN mkdir /var/run/sshd

RUN echo 'root:screencast' | chpasswd

RUN sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config


# SSH login fix. Otherwise user is kicked off after login

RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd


ENV NOTVISIBLE "in users profile"

RUN echo "export VISIBLE=now" >> /etc/profile


EXPOSE 22

CMD ["/usr/sbin/sshd", "-D"]



docker build -t example/sshd .

docker run -d -P --name sshd example/sshd

docker port sshd



# docker 이미지 생성과정 보기

docker history 3ae93df2b9a5



[ Dockerfile 명령어 설명 ]

CMD : 컨테이너를 런칭할 때 실행하는 명령어, 하나만 설정할 수 있음

          docker run -d --name sshd example/sshd /usr/sbin/sshd -D 와 같이 오버라이드 가능


ENTRYPOINT : docker run 의 마지막 인자를 활용하여 명령어를 오버라이드할 수 없음

                      ENTRYPOINT ["/usr/sbin/sshd"]

                      CMD ["-T"]

                     docker run 의 마지막 인자를 -D 로 주면 -T 가 오버라이드되어 포그라운드로 실행


WORKDIR : working directory 변경

                  WORKDIR /opt/webapp/db

                  docker run -it -w /var/log ubuntu pwd  도커실행 디렉토리가 /var/log 가 된다


ENV : 환경 변수를 설정하는데 사용, 그 다음 RUN 명령어를 위해 사용됨, 컨테이너에서 지속됨

         ENV RVM_PATH /home/rvm/

         RUN gem install unicorn

         RVM_PATH=/home/rvm/ gem install unicorn 와 같음

        docker run -it -e "WEB_PORT=8080" ubuntu env 를 하면 WEB_PORT가 설정되어 있음


USER : 이미지를 실행시키는 사용자

           USER nginx

           docker run -d -u nginx example/nginx 과 같이 -u 로 오버라이드 가능


VOLUME : 컨테이너에 볼륨을 추가한다. host 의 볼륨은 /var/lib/docker/volumes/ 여기에 위치

               여러 볼륨을 배열로 지정할 수 있다. 도커안에서 해당 위치로 볼륨 접근이 가능하다.

               VOLUME ["/opt/project", "/data" ]

               docker run -it -v /opt/project -v /data ubuntu /bin/bash 와 같이 -v 옵션과 동일


ADD : 파일과 디렉토리를 복사한다.

         ADD ../app /opt/

         host 빌드 디렉토리 위의 app 디렉토리를 컨테이너의 /opt/ 디렉토리에 복사한다.

         목적지가 / 로 끝나면 소스가 디렉토리라는 의미하고 목적지가 / 가 없으면 파일을 의미한다.

         ADD latest.tar.gz /var/www/wordpress/ 

         latest.tar.gz 압축파일을 /var/www/wordpress/ 디렉토리에 해제한다.

         목적 디렉토리에 같은 이름을 갖는 파일이나 디렉토리가 존재하면 덮어쓰지는 않는다.

         목적 경로가 존재하지 않으면 모드는 0755, UID와 GID 는 0으로 새롭게 생성된다. 


COPY : ADD와 비슷하나 빌드 디렉토리 밖의 파일을 복사 못하고 추출이나 압축해제 기능은 없다.

           COPY conf.d/ /etc/apache2/

           빌드 디렉토리 내의 파일, 디렉토리만 복사할 수 있으며, 파일시스템의 메타데이터도 복사

           UID 와 GID 는 0 로 된다.


ONBUILD : 이미지에 트리거를 추가한다. 새로운 명령어 빌드 과정에 삽입한다.

                ONBUILD ADD . /app/src

                ONBUILD RUN cd /app/src && make

                해당 이미지를 상속해서 새로운 이미지를 빌드할 때 자동으로 ADD 와 RUN 이 실행

                트리거는 한 번만 상속된다.




[ Dockerfile ]

# github 에서 다운로드

$ git clone https://github.com/jamtur01/dockerbook-code

$ cd dockerbook-code/code/5/website

$ docker build -t example/nginx .

$ docker run -d -p 80 --name website \

   -v $PWD/website:/var/www/html/webiste:ro example/nginx nginx


or



# nginx

$ vi Dockerfile

FROM ubuntu:14.04

MAINTAINER James Turnbull "james@example.com"

ENV REFRESHED_AT 2014-06-01


RUN apt-get update

RUN apt-get -y -q install nginx


RUN mkdir -p /var/www/html/website

ADD nginx/global.conf /etc/nginx/conf.d/

ADD nginx/nginx.conf /etc/nginx/


EXPOSE 80




$ vi nginx/global.conf

server {

        listen          0.0.0.0:80;

        server_name     _;


        root            /var/www/html/website;

        index           index.html index.htm;


        access_log      /var/log/nginx/default_access.log;

        error_log       /var/log/nginx/default_error.log;

}





$ vi nginx/nginx.conf

user www-data;

worker_processes 4;

pid /run/nginx.pid;

daemon off;


events {  }


http {

  sendfile on;

  tcp_nopush on;

  tcp_nodelay on;

  keepalive_timeout 65;

  types_hash_max_size 2048;

  include /etc/nginx/mime.types;

  default_type application/octet-stream;

  access_log /var/log/nginx/access.log;

  error_log /var/log/nginx/error.log;

  gzip on;

  gzip_disable "msie6";

  include /etc/nginx/conf.d/*.conf;

}



$ docker build -t exmaple/nginx .


$ docker history -H --no-trunc=true 3e1cdbcccf11


$ mkdir website; cd website

$ wget https://raw.githubusercontent.com/jamtur01/dockerbook-\

code/master/code/5/website/website/index.html


$ cd ..

$ docker run -d -p 80 --name website \

   -v $PWD/website:/var/www/html/webiste:ro example/nginx nginx





Jenkins by 구교준 (@Bliexsoft)

$ cd ~/Documents/Docker

$ git clone https://github.com/jenkinsci/docker.git jenkinsci


## 아래  Maven 설치 추가

$ vi Dockerfile

67 # Install Maven - Start

68 USER root

69 

70 ENV MAVEN_VERSION 3.3.9

71 

72 RUN mkdir -p /usr/share/maven \

   && curl -fsSL http://apache.osuosl.org/maven/maven-3/$MAVEN_VERSION/\
binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz \

       | tar -xzC /usr/share/maven --strip-components=1 \

         && ln -s /usr/share/maven/bin/mvn /usr/bin/mvn

76 

77         ENV MAVEN_HOME /usr/share/maven

78 

79         VOLUME /root/.m2

80 # Install Maven - End

81 

82 # Setting Jenkins

83 USER jenkins

84 

85 COPY config.xml /var/jenkins_home/config.xml

86 COPY hudson.tasks.Maven.xml /var/jenkins_home/hudson.tasks.Maven.xml

87 

88 COPY plugins.txt /usr/share/jenkins/ref/

89 RUN /usr/local/bin/plugins.sh /usr/share/jenkins/ref/plugins.txt




$ vi config.xml

1 <?xml version='1.0' encoding='UTF-8'?>

2 <hudson>

3   <disabledAdministrativeMonitors/>

4   <version>1.651.2</version>

5   <numExecutors>2</numExecutors>

6   <mode>NORMAL</mode>

7   <useSecurity>true</useSecurity>

8   <authorizationStrategy 

                   class="hudson.security.AuthorizationStrategy$Unsecured"/>

9   <securityRealm class="hudson.security.SecurityRealm$None"/>

10   <disableRememberMe>false</disableRememberMe>

11   <projectNamingStrategy 

       class="jenkins.model.ProjectNamingStrategy$DefaultProjectNamingStrategy"/>

12   <workspaceDir>${JENKINS_HOME}/workspace/${ITEM_FULLNAME}

       </workspaceDir>

13   <buildsDir>${ITEM_ROOTDIR}/builds</buildsDir>

14   <jdks>

15     <jdk>

16       <name>jdk8</name>

17       <home>/usr/lib/jvm/java-8-openjdk-amd64</home>

18       <properties/>

19     </jdk>

20   </jdks>

21   <viewsTabBar class="hudson.views.DefaultViewsTabBar"/>

22   <myViewsTabBar class="hudson.views.DefaultMyViewsTabBar"/>

23   <clouds/>

24   <quietPeriod>5</quietPeriod>

25   <scmCheckoutRetryCount>0</scmCheckoutRetryCount>

26   <views>

27     <hudson.model.AllView>

28       <owner class="hudson" reference="../../.."/>

29       <name>All</name>

30       <filterExecutors>false</filterExecutors>

31       <filterQueue>false</filterQueue>

32       <properties class="hudson.model.View$PropertyList"/>

33     </hudson.model.AllView>

34   </views>

35   <primaryView>All</primaryView>

36   <slaveAgentPort>50000</slaveAgentPort>

37   <label></label>

38   <nodeProperties/>

39   <globalNodeProperties/>

40 </hudson>



$ vi hudson.tasks.Maven.xml

1 <?xml version='1.0' encoding='UTF-8'?>

2 <hudson.tasks.Maven_-DescriptorImpl>

3     <installations>

4         <hudson.tasks.Maven_-MavenInstallation>

5             <name>maven3.3.9</name>

6             <home>/usr/share/maven</home>

7             <properties/>

8         </hudson.tasks.Maven_-MavenInstallation>

9     </installations>

10 </hudson.tasks.Maven_-DescriptorImpl>



$ vi plugins.txt

maven-plugin:2.13

credentials:2.0.7

plain-credentials:1.1

token-macro:1.12.1

cloudfoundry:1.5

klocwork:1.18

ssh-credentials:1.11

matrix-project:1.6

mailer:1.16

scm-api:1.0

promoted-builds:2.25

parameterized-trigger:2.4

git-client:1.19.6

git:2.4.4

github-api:1.75

github:1.19.1










[ Docker in docker ]

https://github.com/jpetazzo/dind




[ Jenkins ]

$ cd dockerbook-code/code/5/jenkins

$ vi Dockerfile

FROM ubuntu:14.04

MAINTAINER james@example.com

ENV REFRESHED_AT 2014-06-01


RUN apt-get update -qq && apt-get install -qqy curl apt-transport-https

RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 \

--recv-keys 58118E89F3A912897C070ADBF76221572C52609D

RUN echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > \

/etc/apt/sources.list.d/docker.list

RUN apt-get update -qq && \

apt-get install -qqy iptables ca-certificates openjdk-7-jdk git-core docker-engine


ENV JENKINS_HOME /opt/jenkins/data

ENV JENKINS_MIRROR http://mirrors.jenkins-ci.org


RUN mkdir -p $JENKINS_HOME/plugins

RUN curl -sf -o /opt/jenkins/jenkins.war -L \

$JENKINS_MIRROR/war-stable/latest/jenkins.war


RUN for plugin in chucknorris greenballs scm-api git-client git ws-cleanup ;\

    do curl -sf -o $JENKINS_HOME/plugins/${plugin}.hpi \

       -L $JENKINS_MIRROR/plugins/${plugin}/latest/${plugin}.hpi ; done


ADD ./dockerjenkins.sh /usr/local/bin/dockerjenkins.sh

RUN chmod +x /usr/local/bin/dockerjenkins.sh


VOLUME /var/lib/docker


EXPOSE 8080


ENTRYPOINT [ "/usr/local/bin/dockerjenkins.sh" ]




$ vi dockerjenkins.sh

#!/bin/bash


# First, make sure that cgroups are mounted correctly.

CGROUP=/sys/fs/cgroup


[ -d $CGROUP ] ||

  mkdir $CGROUP


mountpoint -q $CGROUP ||

  mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {

    echo "Could not make a tmpfs mount. Did you use -privileged?"

    exit 1

  }


# Mount the cgroup hierarchies exactly as they are in the parent system.

for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)

do

  [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS

  mountpoint -q $CGROUP/$SUBSYS ||

    mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS

done


# Now, close extraneous file descriptors.

pushd /proc/self/fd

for FD in *

do

  case "$FD" in

  # Keep stdin/stdout/stderr

  [012])

    ;;

  # Nuke everything else

  *)

    eval exec "$FD>&-"

    ;;

  esac

done

popd


docker daemon &

exec java -jar /opt/jenkins/jenkins.war







$ docker build -t example/dockerjenkins .

$ docker run -p 8080:8080 --name jenkins --privileged -d example/dockerjenkins




## 다른 Host 의 docker daemon 에 접속하기 (cert 는 안해도 됨)

## centos

export DOCKER_HOST=tcp://192.168.75.133:2375



## boot2docker

$ export DOCKER_HOST=tcp://192.168.59.103:2376

export DOCKER_TLS_VERIFY=1 

export DOCKER_CERT_PATH=/Users/ahnsk/.boot2docker/certs/boot2docker-vm











반응형
Posted by seungkyua@gmail.com
,
반응형

[ Server IP 정보 ]

eth0 : NAT type         (vmnet2)  192.168.75.138        Public Network

eth1 : Host-only type (vmnet3)  192.168.230.138      Private Network

[ Multi Node 의 경우 두번째 추가 Compute Node ]
eth0 : NAT type         (vmnet2)  192.168.75.139       Public Network
eth1 : Host-only type (vmnet3)  192.168.230.139      Private Network

[ User 선택 ]
stack 유저로 생성

[ visudo 세팅 ]
stack   ALL=(ALL:ALL) NOPASSWD:ALL

[ vi /etc/network/interfaces ]
auto lo
iface lo inet loopback

auto ens33
iface ens33 inet static
        address 192.168.75.138
        netmask 255.255.255.0
        gateway 192.168.75.2
        dns-nameservers 8.8.8.8 8.8.4.4

auto ens34
iface ens34 inet static
        address 192.168.230.138
        netmask 255.255.255.0


[ Host 변경 ]
mkdir -p ~/Documents/scripts
cd ~/Documents/scripts

vi servers.txt
192.168.230.138 devstack01
192.168.230.139 devstack02

vi 01-hosts-setup.sh
#!/bin/bash

SERVERLIST=$HOME/Documents/scripts/servers.txt
MASTER_IP="192.168.230.138"
MASTER_HOSTNAME="devstack01"
SSH_USER="stack"

function set_sshkey() {
    local server=$1
    if [[ $server == "$MASTER_IP" ]]; then
        if [[ ! -f "${HOME}/.ssh/id_rsa" ]]; then
            yes "" | ssh-keygen -t rsa -N ""
        else
            echo "skip to create ssh-keygen"
        fi
    fi
    cat ~/.ssh/id_rsa.pub | ssh $SSH_USER@$server -oStrictHostKeyChecking=no \
        "if [ ! -f ~/.ssh/authorized_keys ] || ! grep -q ${MASTER_HOSTNAME} ~/.ssh/authorized_keys; then \
             umask 077; test -d .ssh || mkdir -p .ssh; cat >> ~/.ssh/authorized_keys; \
         fi"
    echo "$server ssh-key ..... done"
}

function change_hostname() {
    local server=$1
    local hostname=$2
    echo ${hostname} | ssh $SSH_USER@$server \
    "if ! grep -q ${hostname} /etc/hostname; then \
         sudo su -c 'cat > /etc/hostname'; \
         sudo hostname -F /etc/hostname;
     fi"
    echo "$server $hostname ..... done"
}

function change_hostfile() {
    local server=$1
    cat servers.txt | ssh $SSH_USER@$server \
    "if ! grep -q ${MASTER_HOSTNAME} /etc/hosts; then \
         sudo su -c 'cat >> /etc/hosts';
     fi"
    echo "$server hostfile .... done"
}

echo "setting sshkey ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        set_sshkey $server
    fi
done < $SERVERLIST

echo "changing hostname ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        hostname=$(echo $line | awk '{print $2}')
        change_hostname $server $hostname
    fi
done < $SERVERLIST

echo "changing hosts file ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        change_hostfile $server
    fi
done < $SERVERLIST



[ NTP 세팅 ]
vi 02-ntp-setup.sh
#!/bin/bash

SERVERLIST=$HOME/Documents/scripts/servers.txt
MASTER_IP="192.168.230.138"
SSH_USER="stack"

function ntp_master_setup() {
    local server=$1
    echo $server | ssh ${SSH_USER}@$server \
    "sudo apt-get update; \
     sudo apt-get install -y bridge-utils libvirt-bin ntp ntpdate; \
     if ! grep -q 'server 127.127.1.0' /etc/ntp.conf; then \
         sudo sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 3.ubuntu.pool.ntp.org/server time.bora.net/g' /etc/ntp.conf; \
         sudo sed -i 's/server ntp.ubuntu.com/server 127.127.1.0/g' /etc/ntp.conf; \
         sudo sed -i 's/restrict 127.0.0.1/restrict 192.168.0.0 mask 255.255.0.0 nomodify notrap/g' /etc/ntp.conf; \
         sudo service ntp restart; \
     fi; \
     sudo ntpdate -u time.bora.net; \
     sudo virsh net-destroy default; \
     sudo virsh net-undefine default"
}

function ntp_slave_setup() {
    local server=$1
    echo $server | ssh ${SSH_USER}@$server \
    "sudo apt-get update; \
     sudo apt-get install -y bridge-utils libvirt-bin ntp ntpdate; \
     if ! grep -c ${MASTER_IP} /etc/ntp.conf; then \
         sudo sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 3.ubuntu.pool.ntp.org/#server 3.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server ntp.ubuntu.com/server $MASTER_IP/g' /etc/ntp.conf; \
         sudo service ntp restart; \
     fi; \
     sudo ntpdate -u $MASTER_IP; \
     sudo virsh net-destroy default; \
     sudo virsh net-undefine default"
}

echo "ntp master setting ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        if [[ $server == "$MASTER_IP" ]]; then
            ntp_master_setup $server
        fi
    fi
done < $SERVERLIST

echo "ntp slave setting ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        if [[ $server != "$MASTER_IP" ]]; then
            ntp_slave_setup $server
        fi
    fi
done < $SERVERLIST



[ local.conf 파일 ]
mkdir -p ~/Documents/github
cd github
git clone https://github.com/openstack-dev/devstack.git
cd devstack

vi local.conf
[[local|localrc]]
HOST_IP=192.168.75.138
SERVICE_HOST=192.168.75.138
MYSQL_HOST=192.168.75.138
RABBIT_HOST=192.168.75.138
GLANCE_HOSTPORT=192.168.75.138:9292
ADMIN_PASSWORD=secret
DATABASE_PASSWORD=secret
RABBIT_PASSWORD=secret
SERVICE_PASSWORD=secret

# Do not use Nova-Network
disable_service n-net

# Neutron service
enable_service neutron
enable_service q-svc
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
enable_service q-meta

# Neutron options
Q_USE_SECGROUP=True
FLOATING_RANGE="192.168.75.0/24"
FIXED_RANGE="10.0.0.0/24"
Q_FLOATING_ALLOCATION_POOL=start=192.168.75.193,end=192.168.75.254
PUBLIC_NETWORK_GATEWAY="192.168.75.2"
Q_L3_ENABLED=True
PUBLIC_INTERFACE=ens33

# Open vSwitch provider networking configuration
Q_USE_PROVIDERNET_FOR_PUBLIC=True
OVS_PHYSICAL_BRIDGE=br-ex
PUBLIC_BRIDGE=br-ex
OVS_BRIDGE_MAPPINGS=public:br-ex

# Nova service
enable_service n-api
enable_service n-cpu
enable_service n-cond
enable_service n-sch
enable_service n-novnc
enable_service n-cauth

# Cinder service
enable_service cinder
enable_service c-api
enable_service c-vol
enable_service c-sch
enable_service c-bak

# Tempest service
enable_service tempest

# Swift service
enable_service s-proxy
enable_service s-object
enable_service s-container
enable_service s-account

# Heat service
enable_service heat
enable_service h-api
enable_service h-api-cfn
enable_service h-api-cw
enable_service h-eng

# Enable plugin neutron-lbaas, octavia
enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas master
enable_plugin octavia https://git.openstack.org/openstack/octavia

# Enable plugin Magnum
#enable_plugin magnum https://github.com/openstack/magnum master
#enable_plugin magnum-ui https://github.com/openstack/magnum-ui master

# Enable plugin Monasca (Ubuntu 16.04 사용 시 Systemctl 에 맞게 수정 필요)
enable_plugin monasca-api https://github.com/openstack/monasca-api master
enable_plugin monasca-log-api https://github.com/openstack/monasca-log-api master

MONASCA_API_IMPLEMENTATION_LANG=\

${MONASCA_API_IMPLEMENTATION_LANG:-python}

MONASCA_PERSISTER_IMPLEMENTATION_LANG=\

${MONASCA_PERSISTER_IMPLEMENTATION_LANG:-python}

MONASCA_METRICS_DB=${MONASCA_METRICS_DB:-influxdb}



# Cinder configuration
VOLUME_GROUP="cinder-volumes"
VOLUME_NAME_PREFIX="volume-"

# Images
# Use this image when creating test instances
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
# Use this image when working with Orchestration (Heat)
IMAGE_URLS+=",https://download.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-23-20151030.x86_64.qcow2"

KEYSTONE_CATALOG_BACKEND=sql
API_RATE_LIMIT=False
SWIFT_HASH=testing
SWIFT_REPLICAS=1
VOLUME_BACKING_FILE_SIZE=70000M

LOGFILE=$DEST/logs/stack.sh.log

# A clean install every time
RECLONE=yes



[ Compute Node 추가 ]
vi local.conf
[[local|localrc]]
HOST_IP=192.168.75.139
SERVICE_HOST=192.168.75.138
MYSQL_HOST=192.168.75.138
RABBIT_HOST=192.168.75.138
GLANCE_HOSTPORT=192.168.75.138:9292
ADMIN_PASSWORD=secret
MYSQL_PASSWORD=secret
RABBIT_PASSWORD=secret
SERVICE_PASSWORD=secret

# Neutron options
PUBLIC_INTERFACE=ens33
ENABLED_SERVICES=n-cpu,n-novnc,rabbit,q-agt

LOGFILE=$DEST/logs/stack.sh.log



[ 설치 실행 ]
./stack.sh


[ 스토리지 마운트 ]
sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 /opt/stack/data/swift/drives/images/swift.img /opt/stack/data/swift/drives/sdb1

sudo losetup /dev/loop1 /opt/stack/data/cinder-volumes-default-backing-file

sudo losetup /dev/loop2 /opt/stack/data/cinder-volumes-lvmdriver-1-backing-file


[ CPU, Ram, Disk Overcommit 세팅 ]
vi /etc/nova/nova.conf

scheduler_default_filters = ..., CoreFilter          # CoreFilter 추가
cpu_allocation_ratio=50.0
ram_allocation_ratio=16.0
disk_allocation_ratio=50.0


[ 서비스 실행 ]
screen -c stack-screenrc


[ VM 생성 ]
. openrc admin demo


openstack project list
openstack security group list

# default sec group rule 추가
openstack security group rule create --proto icmp --src-ip 0.0.0.0/0 --dst-port -1 --ingress 2d95031b-132b-4d46-aacd-f392cdd8c4fb

openstack security group rule create --proto tcp --src-ip 0.0.0.0/0 --dst-port 1:65535 --ingress 2d95031b-132b-4d46-aacd-f392cdd8c4fb

# private key 생성
openstack keypair create --public-key ~/.ssh/id_rsa.pub magnum-key


openstack flavor list
openstack image list
openstack network list

# nova boot
openstack server create --image 7e688989-e59b-4b20-a562-1de946ee91e9 --flavor m1.tiny  --nic net-id=f57b8f2c-cd67-4d49-b38c-393dbb773c9b  --key-name magnum-key --security-group default test-01


# floating ip 생성 및 서버 할당
openstack ip floating create public
openstack ip floating list
openstack ip floating add 192.168.75.194 test-01


# Router 보기
sudo ip netns
qdhcp-f57b8f2c-cd67-4d49-b38c-393dbb773c9b
qrouter-b46e14d5-4ef5-4bfa-8dc3-463a982688ab


[ tcpdump 방법 ]
# Compute Node
[vm] -> tap:[qbrb97b5aa3-f8 Linux Bridge]:qvbb97b5aa3-f8 -> qvob97b5aa3-f8:[OVS br-int Bridge]:patch-tun -> patch-int:[OVS br-tun Bridge]:br-tun ->

# Network Node
br-tun:OVS br-tun Bridge:patch-int -> patch-tun:OVS br-int Bridge:qr-c163af1e-53 -> 
qr-c163af1e-53:qrouter(Namespace) -> qg-d8187261-68:qg(Namespace) -> 
qg-d8187261-68:OVS br-int Bridge:int-br-ex -> phy-br-ex:OVS br-ex Bridge -> NIC 

sudo tcpdump -n -e -i qbrb97b5aa3-f8 | grep 10.0.0.3
sudo tcpdump -n -e -i qvbb97b5aa3-f8 | grep 10.0.0.3
sudo tcpdump -n -e -i qvob97b5aa3-f8 | grep 10.0.0.3
sudo ip netns exec qrouter-b46e14d5-4ef5-4bfa-8dc3-463a982688ab tcpdump -n -e -i qr-c163af1e-53 | grep 10.0.0.3



[ Magnum k8s 생성 ]
cd ~/Documents/github/devstack/files
wget https://fedorapeople.org/groups/magnum/fedora-21-atomic-5.qcow2
glance image-create --name fedora-21-atomic-5 \
                    --visibility public \
                    --disk-format qcow2 \
                    --os-distro fedora-atomic \
                    --container-format bare < fedora-21-atomic-5.qcow2


magnum service-list

magnum baymodel-create --name k8sbaymodel \
                       --image-id fedora-21-atomic-5 \
                       --keypair-id magnum-key \
                       --external-network-id public \
                       --dns-nameserver 8.8.8.8 \
                       --flavor-id m1.small \
                       --docker-volume-size 5 \
                       --network-driver flannel \
                       --coe kubernetes

magnum baymodel-list
magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1

neutron lb-pool-list
neutron lb-vip-list
neutron lb-member-list

magnum bay-list


[ magnum 클러스터 생성 에러 시 수동으로 할 때 삭제해야 할 것 ]
floating ip  삭제 - api-pool-vip,  kube-master, kube-node
openstack ip floating list
sudo ip netns exec qrouter-2f49aeb4-421c-4994-923a-5aafe453fa3d ip a

api.pool.vip 삭제
neutron lb-vip-list
neutron lb-pool-list
neutron lb-member-list

# private network 삭제
openstack network list

# router 삭제, external gateway 삭제
openstack router list
openstack port list
openstack router remove port        (gateway 를 제거)
openstack router remove subnet    (subnet 을 제거)











반응형
Posted by seungkyua@gmail.com
,
반응형

ca-key.pem -> ca.pem

server-key.pem -> server.csr -> server.csr + (ca-key.pem + ca.pem) -> server.cert

client-key.pem -> client.csr -> client.csr + (ca-key.pem + ca.pem) -> client.cert



[ CA 생성 ]


1. ca-key.pem => ca.pem    (ca.crt: client ca 파일)

$ sudo mkdir -p /etc/docker

$ cd /etc/docker

$ echo 01 | sudo tee ca.srl


$ sudo openssl genrsa -des3 -out ca-key.pem

Enter pass phrase for ca-key.pem:

Verifying - Enter pass phrase for ca-key.pem:


$ sudo openssl req -new -days 365 -key ca-key.pem -out ca.pem

Enter pass phrase for ca-key.pem:

...

Common Name (e.g. server FQDN or Your name) []: *         (ex : www.ahnseungkyu.com)



[ Server Cert 생성 ]


1. server-key.pem => server.csr    (Common Name : e.g. server FQDN 이 중요)

$ sudo openssl genrsa -des3 -out server-key.pem

Enter pass phrase for server-key.pem:

Verifying - Enter pass phrase for server-key.pem:


$ sudo openssl req -new -key server-key.pem -out server.csr

Enter pass phrase for server-key.pem:

...

Common Name (e.g. server FQDN or Your name) []: *         (ex : www.ahnseungkyu.com)


2. ca-key.pem + ca.pem + server.csr => server-cert.pem (server.cert: 서버 cert 파일)

$ sudo openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem -out server-cert.pem

Enter pass phrase for ca-key.pem:


3. server-key.pem 의 phrase 를 삭제 (server.key: 서버 private key 파일)

$ sudo openssl rsa -in server-key.pem -out server-key.pem

Enter pass phrase for server-key.pem:

writing RSA key


4. 퍼미션 수정

$ sudo chmod 600 /etc/docker/server-key.pem /etc/docker/server-cert.pem /etc/docker/ca-key.pem /etc/docker/ca.pem




[ Docker 데몬 설정 ]


Ubuntu, Debian : /etc/default/docker

RHEL, Fedora    : /etc/sysconfig/docker

systemd 버전     : /usr/lib/systemd/system/docker.service




[ systemd Docker Server 실행 ]


ExecStart=/usr/bin/docker -d -H tcp://0.0.0.0.2376 --tlsverify --tlscacert=/etc/docker/ca.pem --tlscert=/etc/docker/server-cert.pem --tlskey=/etc/docker/server-key.pem


[ Docker 데몬 reload 및 재시작 필요 ]

$ sudo systemctl --system daemon-reload




[ Client Cert 생성 ]


1. client-key.pem => client.csr

$ sudo openssl genrsa -des3 -out client-key.pem

Enter pass phrase for client-key.pem:

Verifying - Enter pass phrase for client-key.pem:


sudo openssl req -new -key client-key.pem -out client.csr

Enter pass phrase for client-key.pem:

...

Common Name (e.g. server FQDN or Your name) []:



2. Client 인증 속성 추가

$ echo extendedKeyUsage = clientAuth > extfile.cnf



3. ca-key.pem + ca.pem + client.csr => client-cert.pem

$ sudo openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem -out client-cert.pem -extfile extfile.cnf

Enter pass phrase for ca-key.pem:



4. client-key 의 phrase 를 삭제

$ sudo openssl rsa -in client-key.pem -out client-key.pem

Enter pass phrase for client-key.pem:

writing RSA key




[ Docker 클라이언트에 ssl 설정 ]


$ mkdir -p ~/.docker

$ cp ca.pem ~/.docker/ca.pem

$ ca client-key.pem ~/.docker/key.pem

$ ca client-cert.pem ~/.docker/cert.pem

$ chmod 600 ~/.docker/key.pem ~/.docker/cert.pem


# docker 연결 테스트

$ sudo docker -H=docker.example.com:2376 --tlsverify info



# server

# sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem \

--tlskey=server-key.pem -H=0.0.0.0:4243


# client -- note that this uses --tls instead of --tlsverify, which I had trouble with 

# docker --tls --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \

-H=dns-name-of-docker-host:4243









반응형
Posted by seungkyua@gmail.com
,