반응형

## ansbile 스크립트 만들 때 참고 명령어

## 변수 보기

$ ansible -i hosts -m debug -a "var=hostvars['kube-master01']" localhost
$ ansible -i hosts -m debug -a "var=groups['kube-masters']" localhost

## 하나의 task 만 수행 (restart flannel)
$ ansible-playbook -i hosts --start-at-task='restart flannel' 03-flannel.yml

## task 조회
$ ansible-playbook -i hosts 03-flannel.yml --list-tasks


## 모든 소스는 github 에 ...



[ 모든 node 에서 수행 ]

$ sudo su -
# vi /etc/ssh/sshd_config
28 PermitRootLogin yes
52 PasswordAuthentication yes

# systemctl restart sshd
# passwd
# passwd stack


$ sudo apt-get install python2.7 python-minimal

$ sudo apt-get update
$ sudo apt-get install -y ansible

## ntp ntpdate 패키지를 설치해서 ntp 를 세팅해야 함




[ git init 설정 후 github 에 올리기 ]
$ cd ~/kubernetes-ansible
$ git init
$ git remote add origin https://github.com/seungkyua/kubernetes-ansible.git
$ git pull origin master
$ git config user.name "Seungkyu Ahn"
$ git config user.email "seungkyua@gmail.com"
$ git add -A
$ git commit -a -m "Intial commit"
$ git push --set-upstream origin master




[ Prerequisite ]
$ vi README.md
# Prerequisite #

 - This ansible-playbook is tested in Ubuntu 16.04 LTS
 - Need one Kubernetes deploy node (a.k.a kube-deploy)
 - Login from kube-deploy to all Kubernetes nodes by `root` user without password using hostname
 - kube-deploy should have swap memory over 2G byte
 - Every work should be executed by `stack` user at kube-deploy
 - Every nodes should be installed ansible and python packages

```
$ sudo apt-get update
$ sudo apt-get install -y ansible python2.7 python-minimal
```

 - Group names and node names can not be changed in `hosts` file

```
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16
```

 - Have to changed your own password at `add_user_password` field in `group_vars/all` file


## Tips ##

An encrypted password can figure out following command

```
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512
[ input password and enter]
```


## Execute order ##

```
$ sudo ansible-playbook -i hosts 00-create-user.yml
$ sudo ansible-playbook -i hosts 00-install-package.yml
$ sudo ansible-playbook -i hosts 01-install-docker.yml
$ sudo chown -R stack.stack ~/.ansible
$ ansible-playbook -i hosts 02-etcd.yml
$ ansible-playbook -i hosts 03-flannel.yml
$ ansible-playbook -i hosts 04-kubernetes.yml
```


## restart service ##

 - restart docker

```
$ sudo ansible-playbook -i hosts --tags="restart docker" 01-install-docker.yml
```

 - restart etcd

```
$ ansible-playbook -i hosts --tags="restart etcd" 02-etcd.yml
```

 - restart flannel

```
$ ansible-playbook -i hosts --tags="restart flannel" 03-flannel.yml
```

 - restart kubernetes

```
$ ansible-playbook -i hosts --tags="restart kube-apiserver,restart kube-controller-manager,restart kube-scheduler,restart kube-proxy,restart kubelet" 04-kubernetes.yml
```



[ kube-deploy node 에서 수행 ]
## kube-deploy node 접속
$ ssh -i ~/magnum-key.pem stack@192.168.30.138
$ ssh-keygen -t rsa
$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id kube-deploy
# ssh-copy-id kube-master01
# ssh-copy-id kube-master02
# ssh-copy-id kube-node01
# ssh-copy-id kube-node02


$ mkdir -p ~/kubernetes-ansible && cd ~/kubernetes-ansible
$ vi hosts
[kube-deploy]
kube-deploy

[kube-masters]
kube-master01
kube-master02

[kube-nodes]
kube-node01
kube-node02

[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14

[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16


$ vi ansible.cfg
[defaults]
host_key_checking = False



## password encrypt 값을 알아냄
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512



## 환경 변수 세팅
$ mkdir -p group_vars && vi group_vars/all
ansible_dir: "kubernetes-ansible"
gopath_dir: "go_workspace"
add_user: "stack"
add_user_password: "gernerated password here !"
ubuntu_release: "xenial"
kube_deploy_uname_r: "4.4.0-22-generic"
uname_r: "4.4.0-21-generic"
etcd_data_dir: "/ext/data/etcd"
flannel_version: "v0.6.1"
flannel_net: "172.16.0.0/16"
mtu_size: "1500"
kube_version: "release-1.4"
kube_api_ip: "192.168.30.13"
service_cluster_ip_range: "192.168.30.192/26"
service_node_port_range: "30000-32767"
cluster_dns: "192.168.30.200"
cluster_domain: "cluster.local" 
 



$ mkdir -p files && vi files/hosts
127.0.0.1       localhost

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts


10.0.0.18           kube-depoly
192.168.30.138  kube-deploy
192.168.30.13    kube-master01
192.168.30.14    kube-master02
192.168.30.15    kube-node01
192.168.30.16    kube-node02




## user 생성 (stack), key 자동 복사, sudo user 등록, 환경변수 세팅, host 파일 복사
$ 00-create-user.yml
---
- name: create the user
  hosts: all
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Add the {{ add_user }} user
      user: name={{ add_user }} groups=sudo createhome=yes shell=/bin/bash
            password={{ add_user_password }} append=yes
            generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa

    - name: Set up authorized_keys for the {{ add_user }}
      authorized_key: user={{ add_user }} key="{{ lookup('file', '/home/{{ add_user }}/.ssh/id_rsa.pub') }}"

    - name: sudo 
      lineinfile:
        "dest=/etc/sudoers state=present regexp='^{{ add_user }} ALL='
         line='{{ add_user }} ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'"

    - name: export GOPATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export GOPATH' line='export GOPATH=$HOME/{{ gopath_dir }}:$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes/Godeps/_workspace'"

    - name: export PATH
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export PATH'
         line='export PATH=$HOME/{{ gopath_dir }}/bin:$PATH'"

    - name: export KUBE_ROOT
      lineinfile:
        "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export KUBE_ROOT'
         line='export KUBE_ROOT=$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes'"

    - name: Copy hosts file
      copy:
        src: "files/hosts"
        dest: "/etc"
        owner: root


sudo ansible-playbook -i hosts 00-create-user.yml




## apt-get package 설치
$ vi 00-install-package.yml
---
- name: install package
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  tasks:
    - include_vars: group_vars/all

    - name: Install apt packages
      apt: name={{ item }}  update_cache=yes
      with_items:
        - bridge-utils
        - linux-libc-dev
        - golang
        - gcc
        - curl
        - git


sudo ansible-playbook -i hosts 00-install-package.yml



## docker install
$ vi 01-install-docker.yml
---
# This playbook setup docker package

- hosts: kube-deploy kube-masters kube-nodes
  remote_user: root

  roles:
    - docker




$ mkdir -p roles/docker/files && vi roles/docker/files/docker.xenial.list
deb https://apt.dockerproject.org/repo ubuntu-xenial main


$ vi roles/docker/files/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/etc/default/docker
ExecStart=/usr/bin/docker daemon $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target



$ vi roles/docker/files/docker
DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock"



$ mkdir -p roles/docker/tasks && vi roles/docker/tasks/main.yml
---
- name: install apt-transport-https ca-certificates
  apt: name={{ item }}
  with_items:
    - apt-transport-https
    - ca-certificates

- name: add GPG key
  apt_key: keyserver=hkp://p80.pool.sks-keyservers.net:80 \
           id=58118E89F3A912897C070ADBF76221572C52609D

- name: add docker.list
  copy:
    src: "docker.{{ ubuntu_release }}.list"
    dest: "/etc/apt/sources.list.d"
    owner: root

- name: apt-get update
  apt: update_cache=yes

- name: install linux-image-extra kube-deploy
  apt: name=linux-image-extra-{{ kube_depoloy_uname_r }}
  when: "'kube-deploy' in group_names"

- name: install linux-image-extra kube-masters kube-nodes
  apt: name=linux-image-extra-{{ uname_r }}
  when: "('kube-masters' in group_names) or ('kube-nodes' in group_names)"

#- name: restart servers
#  shell: sleep 2 && shutdown -r now
#  async: 0
#  poll: 0
#  ignore_errors: true

#- name: Waiting for server to come back
#  local_action: wait_for host={{ inventory_hostname }} \
#                state=started port=22 delay=10 timeout=300

#- name: Update apt
#  apt: update_cache=yes

- name: install docker
  apt: name=docker-engine

- name: add docker group
  user: name={{ add_user }} group=docker

- name: copy docker config
  copy:
    src: "docker"
    dest: "/etc/default"
    mode: 0755
    owner: root

- name: copy docker.service
  copy:
    src: "docker.service"
    dest: "/lib/systemd/system"
    mode: 0644
    owner: root

- name: reload systemd
  shell: systemctl daemon-reload

- name: restart docker
  service: name=docker state=restarted enabled=yes
  tags:
    - restart docker

- name: export DOCKER_HOST
  lineinfile:
    "dest=/home/{{ add_user }}/.bashrc state=present regexp='^export DOCKER_HOST'
     line='export DOCKER_HOST=127.0.0.1:4243'"


$ sudo ansible-playbook -i hosts 01-install-docker.yml



[ etcd 설치 ]

sudo chown -R stack.stack ~/.ansible
$ vi 02-etcd.yml
---
# This playbook installs etcd cluster.

- name: Setup etcd
  hosts: kube-masters
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - etcd


## --listen-peer-urls 과 --listen-client-urls 은 0.0.0.0 주소로 줄 수 있음
$ mkdir -p roles/etcd/templates && vi roles/etcd/templates/etcd.conf.j2
DAEMON_ARGS="--name {{ inventory_hostname }} \
--data-dir={{ etcd_data_dir }} \
--initial-advertise-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379,http://127.0.0.1:2379,\
http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001,http://127.0.0.1:4001 \
--advertise-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379 \
--initial-cluster-token etcd-cluster-1 \
{% for host in groups['kube-masters'] %}
{% if host == groups['kube-masters']|first %}
--initial-cluster {{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% else %},{{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% endif %}{% endfor %} \

--initial-cluster-state new"


$ mkdir -p roles/etcd/tasks && vi roles/etcd/tasks/main.yml
---
- name: install etcd
  apt: name=etcd update_cache=yes

- name: copy etcd config
  template: src=etcd.conf.j2 dest=/etc/default/etcd

- name: enable etcd systemd
  service: name=etcd enabled=yes

- name: restart etcd
  service: name=etcd state=restarted
  tags:
    - restart etcd






$ ansible-playbook -i hosts 02-etcd.yml



## 특정 task 만 돌릴 경우
$ ansible-playbook -i hosts --start-at-task="Restart etcd" 02-etcd.yml

## etcd 멤버 조회 (2개가 나와야 함)
$ etcdctl member list

## etcd 테스트
$ etcdctl --endpoint http://192.168.30.13:2379 set /test "hello"
$ etcdctl --endpoint http://192.168.30.14:2379 get /test
$ etcdctl --endpoint http://192.168.30.13:2379 rm /test

$ etcdctl --no-sync --endpoint http://kube-master01:2379 --debug ls / -recursive




[ flannel 설치 ]

## flannel 데이터를 etcd 입력 및 설치
$ vi 03-flannel.yml
---
- name: Setup flannel
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - flannel


## flannel 을 위한 네트워크 값을  etcd 에 등록하는 스트립트
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/config-flannel.sh.j2
#!/bin/bash

exec curl -s -L http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'



## 좀 무식한 방식임
{% if inventory_hostname == 'kube-master01' %}
exec curl -s -L http://{{ groups['kube-masters'][0] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
{% endif %}


## flannel 을 다운 받아서 build 하는 스크립트
$ vi roles/flannel/templates/download-flannel.sh.j2
#!/bin/bash

FLANNEL_DIR=${HOME}/github/flannel
FLANNEL_VERSION="{{ flannel_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

function chdir() {
    cd $1
}

if [ ! -d ${FLANNEL_DIR} ]; then
    mkdir -p ${HOME}/github
    chdir ${HOME}/github
    git clone https://github.com/coreos/flannel.git
    chdir ${FLANNEL_DIR}
    git checkout -b ${FLANNEL_VERSION} tags/${FLANNEL_VERSION}
fi

chdir ${FLANNEL_DIR}

if [ ! -f build ]; then
cat <<EOF >build
#!/bin/bash -e

ORG_PATH="github.com/coreos"
REPO_PATH="\${ORG_PATH}/flannel"

if [ ! -h gopath/src/\${REPO_PATH} ]; then
        mkdir -p gopath/src/\${ORG_PATH}
        ln -s ../../../.. gopath/src/\${REPO_PATH} || exit 255
fi

export GOBIN=\${PWD}/bin
export GOPATH=\${PWD}/gopath

eval \$(go env)

if [ \${GOOS} = "linux" ]; then
        echo "Building flanneld..."
        go build -o \${GOBIN}/flanneld \${REPO_PATH}
else
        echo "Not on Linux - skipping flanneld build"
fi
EOF
fi

chmod 755 build
./build

mkdir -p ${ANSIBLE_HOME}/roles/flannel/files
cp bin/flanneld ${ANSIBLE_HOME}/roles/flannel/files



## flannel.service 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/flanneld.service.j2
[Unit]
Description=flanneld Service
#After=etcd.service
#Requires=etcd.service

[Service]
EnvironmentFile=/etc/default/flanneld
PermissionsStartOnly=true
User=root
ExecStart=/usr/bin/flanneld -etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
Restart=always
RestartSec=10s
RemainAfterExit=yes

[Install]
WantedBy=multi-user.target
Alias=flanneld.service


## flannel config 파일
$ vi roles/flannel/templates/flanneld.j2
FLANNEL_ETCD="http://{{ groups['kube-masters'][0] }}:4001"
FLANNEL_OPTIONS="-v 0"


## flannel 과 연계된 docker 설정 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/docker-config.sh.j2
#! /bin/bash

ip link set dev docker0 down
brctl delbr docker0

source /run/flannel/subnet.env

echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 \
-H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} \
--mtu=${FLANNEL_MTU}\" > /etc/default/docker



## flannel ansible 설치 로직
$ mkdir -p roles/flannel/tasks && vi roles/flannel/tasks/main.yml
---
# set flannel data into etcd
- name: copy config-flannel
  template: src=config-flannel.sh.j2 dest=~/config-flannel.sh mode=755
  when: inventory_hostname == 'kube-master01'

- name: run config-flannel
  command: ~/config-flannel.sh
  when: inventory_hostname == 'kube-master01'

- name: remove config-flannel
  file: name=~/config-flannel.sh state=absent
  when: inventory_hostname == 'kube-master01'

# flannel download, build, install
- name: copy download-flannel
  template: src=download-flannel.sh.j2 dest=~/download-flannel.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-flannel
  command: ~/download-flannel.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-flannel
  file: name=~/download-flannel.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: copy flanneld
  copy: src=flanneld dest=/usr/bin/flanneld owner=root mode=0755
  when: "'kube-nodes' in group_names"

- name: copy flanneld.service
  template: src=flanneld.service.j2 dest=/lib/systemd/system/flanneld.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: resize MTU
  command: ip link set dev {{ hostvars[item][item + '-iface'] }} mtu {{ mtu_size }}
  with_items: groups['kube-nodes']
  when: "'kube-nodes' in group_names"

- name: copy flanneld config
  template: src=flanneld.j2 dest=/etc/default/flanneld
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-nodes' in group_names"

- name: restart flannel
  service: name=flanneld state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart flannel
  notify:
    - restart flannel



## handler 는 task 의 notify 로 호출됨
$ mkdir -p roles/flannel/handlers && vi roles/flannel/handlers/main.yml
---
- name: restart flannel
  service: name=flanneld state=restarted
  notify:
    - stop docker
    - delete docker0
    - copy docker-config
    - run docker-config
    - remove docker-config
    - start docker
  when: "'kube-nodes' in group_names"

- name: stop docker
  service: name=docker state=stopped
  when: "'kube-nodes' in group_names"

- name: delete docker0
  command: ip link delete docker0
  ignore_errors: yes
  when: "'kube-nodes' in group_names"

- name: copy docker-config
  template: src=docker-config.sh.j2 dest=~/docker-config.sh mode=755
  when: "'kube-nodes' in group_names"

- name: run docker-config
  command: ~/docker-config.sh
  ignore_errors: true
  when: "'kube-nodes' in group_names"

- name: remove docker-config
  file: name=~/docker-config.sh state=absent
  when: "'kube-nodes' in group_names"

- name: start docker
  service: name=docker state=started
  when: "'kube-nodes' in group_names"



$ ansible-playbook -i hosts 03-flannel.yml 
 






###################################
## k8s 소스 다운로드 및 make  (ansible)
###################################
## cert 파일 만들기
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-cert.sh
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh

## kubernetes 다운로드 및 설치
$ vi 04-kubernetes.yml
---
- name: Setup kubernetes
  hosts: kube-deploy kube-masters kube-nodes
  remote_user: "{{add_user}}"
  become: true
  become_user: root

  roles:
    - kubernetes



## k8s 을 다운 받아서 build 하는 스크립트 (GOPATH 와 PATH 가 중요)
$ mkdir -p roles/kubernetes/templates && vi roles/kubernetes/templates/download-kubernetes.sh.j2
#!/bin/bash

GO_HOME=${HOME}/{{ gopath_dir }}
KUBE_HOME=${GO_HOME}/src/k8s.io/kubernetes
KUBE_VERSION="{{ kube_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}

export GOPATH=${GO_HOME}:${KUBE_HOME}/Godeps/_workspace
export PATH=${GO_HOME}/bin:$PATH

function chdir() {
    cd $1
}

if [ ! -d ${KUBE_HOME} ]; then
    mkdir -p ${GO_HOME}/src/k8s.io
    chdir ${GO_HOME}/src/k8s.io
    go get -u github.com/jteeuwen/go-bindata/go-bindata
    git clone https://github.com/kubernetes/kubernetes.git
    chdir ${KUBE_HOME}
    git checkout -b ${KUBE_VERSION} origin/${KUBE_VERSION}
fi

chdir ${KUBE_HOME}
if [ ! -d ${KUBE_HOME}/_output ]; then
    make all
fi

mkdir -p ${ANSIBLE_HOME}/roles/kubernetes/files
cp _output/local/bin/linux/amd64/kube* ${ANSIBLE_HOME}/roles/kubernetes/files


## Kubernetes config 파일
$ vi roles/kubernetes/templates/kube-apiserver.conf.j2
KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:4001 \
--logtostderr=true \
--service-cluster-ip-range={{ service_cluster_ip_range }} \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,\
ResourceQuota,DenyEscalatingExec,SecurityContextDeny \
--service-node-port-range={{ service_node_port_range }} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key"


$ vi roles/kubernetes/templates/kube-apiserver.service.j2
[Unit]
Description=Kubernetes API Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-apiserver
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-controller-manager.conf.j2
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true"


$ vi roles/kubernetes/templates/kube-controller-manager.service.j2
[Unit]
Description=Kubernetes Controller Manager
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-controller-manager
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kube-scheduler.conf.j2
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--master=127.0.0.1:8080"


$ vi roles/kubernetes/templates/kube-scheduler.service.j2
[Unit]
Description=Kubernetes Scheduler
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-scheduler
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



$ vi roles/kubernetes/templates/kubelet.conf.j2
KUBELET_OPTS="--address=0.0.0.0 \
--port=10250 \
--hostname-override={{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }} \
--api-servers=http://{{ kube_api_ip }}:8080 \
--logtostderr=true \
--cluster-dns={{ cluster_dns }} \
--cluster-domain={{ cluster_domain }}"


$ vi roles/kubernetes/templates/kubelet.service.j2
[Unit]
Description=Kubernetes Kubelet
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kubelet
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


$ vi roles/kubernetes/templates/kube-proxy.conf.j2
KUBE_PROXY_OPTS="--master=http://{{ kube_api_ip }}:8080 --logtostderr=true"


$ vi roles/kubernetes/templates/kube-proxy.service.j2
[Unit]
Description=Kubernetes Proxy Server
After=syslog.target network.target

[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-proxy
ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target



## kubernetes ansible 설치 로직
$ mkdir -p roles/kubernetes/tasks && vi roles/kubernetes/tasks/main.yml
---
- name: copy download-kubernetes.sh
  template: src=download-kubernetes.sh.j2 dest=~/download-kubernetes.sh
        owner={{ add_user }} mode=755
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: run download-kubernetes.sh
  command: ~/download-kubernetes.sh owner={{ add_user }}
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: remove download-kubernetes.sh
  file: name=~/download-kubernetes.sh owner={{ add_user }} state=absent
  become: yes
  become_user: "{{ add_user }}"
  when: "'kube-deploy' in group_names"

- name: add kube-cert group
  group: name=kube-cert state=present

- name: make cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: make ca-cert
  command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh {{ kube_api_ip }}
  become: yes
  become_user: root
  when: "'kube-deploy' in group_names"

- name: change mod cert
  file:
    path: /srv/kubernetes
    mode: 0755
    recurse: yes
  when: "'kube-deploy' in group_names"

- name: create cert directory
  file: path=/srv/kubernetes state=directory owner=root mode=0755

- name: copy server.cert
  copy: src=/srv/kubernetes/server.cert dest=/srv/kubernetes/server.cert
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy server.key
  copy: src=/srv/kubernetes/server.key dest=/srv/kubernetes/server.key
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy ca.crt
  copy: src=/srv/kubernetes/ca.crt dest=/srv/kubernetes/ca.crt
        owner=root mode=0600
  become: yes
  become_user: root

- name: copy kubectl
  copy: src=kubectl dest=/usr/local/bin/kubectl
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver
  copy: src=kube-apiserver dest=/usr/local/bin/kube-apiserver
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager
  copy: src=kube-controller-manager dest=/usr/local/bin/kube-controller-manager
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler
  copy: src=kube-scheduler dest=/usr/local/bin/kube-scheduler
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kubelet
  copy: src=kubelet dest=/usr/local/bin/kubelet
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy
  copy: src=kube-proxy dest=/usr/local/bin/kube-proxy
        owner=root mode=0755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-apiserver config
  template: src=kube-apiserver.conf.j2 dest=/etc/default/kube-apiserver
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-apiserver.service
  template: src=kube-apiserver.service.j2 dest=/lib/systemd/system/kube-apiserver.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager config
  template: src=kube-controller-manager.conf.j2 dest=/etc/default/kube-controller-manager
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-controller-manager.service
  template: src=kube-controller-manager.service.j2 dest=/lib/systemd/system/kube-controller-manager.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler config
  template: src=kube-scheduler.conf.j2 dest=/etc/default/kube-scheduler
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-masters' in group_names"

- name: copy kube-scheduler.service
  template: src=kube-scheduler.service.j2 dest=/lib/systemd/system/kube-scheduler.service
            owner=root mode=0644
  when: "'kube-masters' in group_names"

- name: copy kubelet config
  template: src=kubelet.conf.j2 dest=/etc/default/kubelet
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kubelet.service
  template: src=kubelet.service.j2 dest=/lib/systemd/system/kubelet.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy config
  template: src=kube-proxy.conf.j2 dest=/etc/default/kube-proxy
        owner={{ add_user }} mode=755
  become: yes
  become_user: root
  when: "'kube-nodes' in group_names"

- name: copy kube-proxy.service
  template: src=kube-proxy.service.j2 dest=/lib/systemd/system/kube-proxy.service
            owner=root mode=0644
  when: "'kube-nodes' in group_names"

- name: reload systemd
  shell: systemctl daemon-reload
  when: "'kube-masters' in group_names"

- name: restart kube-apiserver
  service: name=kube-apiserver state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-apiserver

- name: restart kube-controller-manager
  service: name=kube-controller-manager state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-controller-manager

- name: restart kube-scheduler
  service: name=kube-scheduler state=restarted enabled=yes
  when: "'kube-masters' in group_names"
  tags:
    - restart kube-scheduler

- name: restart kubelet
  service: name=kubelet state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kubelet

- name: restart kube-proxy
  service: name=kube-proxy state=restarted enabled=yes
  when: "'kube-nodes' in group_names"
  tags:
    - restart kube-proxy




ansible-playbook -i hosts 04-kubernetes.yml






















반응형
Posted by seungkyua@gmail.com
,
반응형

1. 다운로드

https://golang.org/doc/install?download=go1.5.2.darwin-amd64.tar.gz     # Mac

https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz           # Linux


$ sudo tar -C /usr/local -xzf go1.5.2.darwin-amd64.tar.gz

$ cd /usr/local

$ sudo chown -R root go


2. 환경 변수

sudo vi /etc/profile

export GOROOT=/usr/local/go                                 # go 설치 위치

export PATH=$PATH:/usr/local/go/bin                      # go 실행파일 위치


$ cd Documents

mkdir -p go_workspace{,/bin,/pkg,/src}


vi .bash_profile 

export GOPATH=$HOME/Documents/go_workspace                     # go workspace 위치

export PATH=$HOME/Documents/go_workspace/bin:$PATH         # go 실행파일 위치



## go tool 다운로드

$ got get golang.org/x/tools/cmd/...



3. go 샘플 다운로드

go get github.com/GoesToEleven/GolangTraining


# kubernetes 소스 다운로드

$ go get k8s.io/kubernetes       # 이렇게 하면 git clone https://github.com/kubernetes/kubernetes


4. Go Workspace 디렉토리 위치

- bin

- pkg

- src - github.com - GoesToEleven - GolangTraining



5. editor WebStorm 다운로드 및 세팅

https://www.jetbrains.com/webstorm/download/

버전 : WebStorm-11.0.3-custom-jdk-bundled.dmg



6. golang plugin 설치

https://plugins.jetbrains.com/plugin/5047?pr=idea

버전 : Go-0.10.749.zip


# Project Open

/Users/ahnsk/Documents/go_workspace/src/github.com/GoesToEleven/GolangTraining


# Preferences 세팅

Go SDK : /usr/local/go

Go Libraries : go_worksapce/src



7. theme 다운로드 및 설정

http://color-themes.com/?view=index

Sublime Text 2.jar 다운로드


File >> import Settings 에서 Sublime Text 2.jar 선택


# Preferences 세팅

Editor -> Colors & Fonts : Scheme을 Sublime Text2로 설정



8. JavaScript Debug 를 위한 live edit plugin 설치

https://plugins.jetbrains.com/plugin/7007?pr=pycharm

LiveEdit.jar 다운로드


# Preferences 세팅

Build, Execution, Deployment -> Debugger -> Live Edit

체크 : Highlight current....

Update Auto in (ms):   16 


# 우측 상단 돋보기 클릭하여 Edit Configuration 조회

창에서 좌측 상단 + 클릭 후 JavaScript Debug 추가


# chrom 웹 스토어에서 확장 프로그램 설치

JetBrains IDE Support



# WebStorm 단축키

파일찾기 : Command + Shift + O

단어찾기 : Command + Shift + F

실행       : Crtl + Alt + R

디버그    : Ctrl + Alt + D

줄삭제    : Command + Backspace

줄복사    : Command + D

포맷       : Command + Alt + L



# go file 규칙 테스트

$ gofmt -s -w file.go


$ git rebase -i   혹은    git push -f   로 작업의 논리적인 유닛으로 커밋



# Docker contribute 시에 DCO (Developer Certificate of Origin) 설정

# commit 마다 설정

Docker-DCO-1.1-Signed-off-by: Seungkyu Ahn <seungkyua@gmail.com> (github: seungkyua)



# 혹은 hook 를 설정

$ cd docker

$ curl -o .git/hooks/prepare-commit-msg \

https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook

$ chmod -x .git/hooks/prepare-commit-msg



# github user 를 세팅

$ git config -set github.user seungkyua



# Channel

# deadlock 을 막을려면 채널로 값을 보내는 쪽에서 close 채널을 해야 한다.

# 채널을 받는 쪽에서는 defer sync.WaitGroup.Done() 을 한다.

# 혹은 새로운 go 루틴을 만들고 sync.WaitGroup.Wait() 으로 끝나길 기달려서 close 채널을 한다.




# 문서 보기

## 문법 에러 검사

$ go vet wordcount.go


## tar 패키지 사용법 보기

$ go doc tar


## 로컬 문서 서버 띄우기

$ godoc -http=:6060



# godep 설치

go get github.com/tools/godep

$ cd ~/Documents/go_workspace/src/github.com/tools/godep

$ go install


## godep 을 사용하는 프로젝트로 이동

$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes/


## godep get 으로 Godeps/_workspace 에 패키지를 다운한다.

## _workspace 는 deprecated 예정

godep get 패키지명


 







반응형
Posted by seungkyua@gmail.com
,

유용한 Site

프로그래밍 2015. 12. 22. 13:12
반응형

1. Kubernates

    구글 논문 : https://research.google.com/pubs/pub43438.html

    구글 발표 : https://speakerdeck.com/jbeda/containers-at-scale











슬라이드 작성

http://prezi.com



















반응형
Posted by seungkyua@gmail.com
,

AngularJS 설치

프로그래밍 2015. 7. 26. 21:53
반응형

1. Mac 에서 brew 를 통한 nodejs, npm 설치

http://brew.sh

$ brew install npm                   # npm 을 설치하면 이펜던시에 의해서 nodejs 도 설치됨


2. Ubuntu 에서 nodejs 설치

http:nodejs.org

$ sudo apt-get install g++


$ ./configure

$ make

$ sudo make install


3. npm 업그레이드

$ sudo npm install -g npm           # /usr/local/lib/node_modules/npm


4. bower 설치  (패키지 매니저 for Web) 및 활용

http://bower.io

$ sudo npm install -g bower


$ bower install jquery                                          # registered package

$ bower install desandro/masonry                         # GitHub shorthand
$ bower install git://github.com/user/package.git   # Git endpoint

$ bower install http://example.com/script.js           # URL


$ bower install angular         # 하위 디렉토리 bower_components/angular 만들고 다운로드


$ vi .bowerrc

{

  "directory": "WebContent/bower"

}

$ bower install angular         # 하위 디렉토리 WebContent/bower/angular 만들고 다운로드


5. eclipse 로 프로젝트 생성 (Project Type : Dynamic Web Project)





















반응형
Posted by seungkyua@gmail.com
,
반응형

1. git 설치하기

# apt-get install git-core git-review

# adduser gerrit

# mkdir -p /git_repo

# chown -R gerrit.gerrit /git_repo

# sudo mkdir -p /git_review

chown -R gerrit.gerrit /git_review

# git init --bare /git_repo/paas.git


2. gerrit 다운로드

https://gerrit-releases.storage.googleapis.com/index.html


3. mysql 설치

# mysql -uroot -p

mysql> CREATE USER 'gerrit'@'localhost' IDENTIFIED BY 'secret';

mysql> CREATE DATABASE reviewdb;

mysql> ALTER DATABASE reviewdb charset=utf8;

mysql> GRANT ALL ON reviewdb.* TO 'gerrit'@'localhost';

mysql> FLUSH PRIVILEGES;



4. apache2 설치

$ sudo apt-get install apache2 apache2-utils libapache2-mod-proxy-html libxml2-dev

$ sudo a2enmod proxy_http

$ sudo a2enmod proxy

$ sudo service apache2 restart


# sudo vi /etc/apache2/sites-available/gerrit.conf

<VirtualHost *:8080>

  ServerName localhost

  ProxyRequests Off

  ProxyVia Off

  ProxyPreserveHost On


  <Proxy *>

    Order deny,allow

    Allow from all

  </Proxy>


  <Location /login/>

    AuthType Basic

    AuthName "Gerrit Code Review"

    Require valid-user

    AuthUserFile /git_review/etc/passwords

  </Location>


  AllowEncodedSlashes On

  ProxyPass / http://127.0.0.1:8081/

  ProxyPassReverse / http://127.0.0.1:8081/                #외부 SSO 검증에 기반한 HTTP 인증

#  RequestHeader set REMOTE-USER %{REMOTE_USER} #외부 SSO 검증에 기반한 HTTP 인증

</VirtualHost>


$ cd /etc/apache2/sites-available

$ sudo a2ensite gerrit.conf

$ sudo vi /etc/apache2/ports.conf

Listen 8080


$ sudo service apache2 restart




5. gerrit site 설치

# apt-get install openjdk-7-jdk


# oracle java 를 설치하는 방법

# add-apt-repository ppa:webupd8team/java

# apt-get udpate

# apt-get install oracle-java7-installer



# su - gerrit

$ cd /git_review

$ cp /home/stack/Downloads/gerrit-2.11.3.war .

$ java -jar gerrit-2.11.3.war init -d /git_review

 *** Git Repositories

*** 


Location of Git repositories   [git]: /git_repo


*** SQL Database

*** 


Database server type           [h2]: mysql


Gerrit Code Review is not shipped with MySQL Connector/J 5.1.21

**  This library is required for your configuration. **

Download and install it now [Y/n]?

Downloading http://repo2.maven.org/maven2/mysql/mysql-connector-java/5.1.21/mysql-connector-java-5.1.21.jar ... OK

Checksum mysql-connector-java-5.1.21.jar OK

Server hostname                [localhost]: 

Server port                    [(mysql default)]: 

Database name                  [reviewdb]: 

Database username              [gerrit]:

gerrit2's password            : secret


*** Index

*** 


Type                           [LUCENE/?]: 


The index must be rebuilt before starting Gerrit:

  java -jar gerrit.war reindex -d site_path


*** User Authentication

*** 


Authentication method          [OPENID/?]: http

# Get username from custom HTTP header [y/N]? y                    # 외부 SSO HTTP 인증시

# Username HTTP Header [SM_USER]: REMOTE_USER_RETURN    # 외부 SSO HTTP 인증시

SSO logout URL  : http://aa:aa@192.168.75.141:8080/


*** Review Labels

*** 


Install Verified label         [y/N]? 


*** Email Delivery

*** 


SMTP server hostname       [localhost]: smtp.gmail.com

SMTP server port               [(default)]: 465

SMTP encryption                [NONE/?]: SSL

SMTP username                 [gerrit]: skanddh@gmail.com


*** Container Process

*** 


Run as                         [gerrit]: 

Java runtime                   [/usr/local/jdk1.8.0_31/jre]: 

Copy gerrit-2.11.3.war to /git_review/bin/gerrit.war [Y/n]? 

Copying gerrit-2.11.3.war to /git_review/bin/gerrit.war


*** SSH Daemon

*** 


Listen on address              [*]: 

Listen on port                 [29418]: 


Gerrit Code Review is not shipped with Bouncy Castle Crypto SSL v151

  If available, Gerrit can take advantage of features

  in the library, but will also function without it.

Download and install it now [Y/n]? N


*** HTTP Daemon

*** 


Behind reverse proxy           [y/N]? y

Proxy uses SSL (https://)      [y/N]? 

Subdirectory on proxy server   [/]: 

Listen on address              [*]: 127.0.0.1        # reverse 이기 때문에

Listen on port                 [8081]: 

Canonical URL                  [http://127.0.0.1/]:


java -jar bin/gerrit.war reindex -d /git_review


htpasswd -c /git_review/etc/passwords skanddh

# service apache2 restart



6. start/stop Daemon

$ /git_review/bin/gerrit.sh restart

$ /git_review/bin/gerrit.sh start

$ /git_review/bin/gerrit.sh stop


$ sudo ln -snf /git_review/bin/gerrit.sh /etc/init.d/gerrit.sh

$ sudo ln -snf /etc/init.d/gerrit.sh /etc/rc3.d/S90gerrit



[ HTTPS 활성화 ]

$ vi gerrit.conf

[httpd]

         listenUrl = proxy-https://127.0.0.1:8081/


$ vi /etc/httpd/conf/httpd.conf

LoadModule ssl_module modules/mod_ssl.so

LoadModule mod_proxy modules/mod_proxy.so

<VirtualHost _default_:443>

SSLEngine on

SSLProtocol all -SSLv2

SSLCipherSuite ALL:!ADH:!EXPORT:!SSLv2:RC4+RSA:+HIGH:+MEDIUM:+LOW

SSLCertificateFile /etc/pki/tls/certs/server.crt

SSLCertifacteKeyFile /etc/pki/tls/private/server.key

SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt

ProxyPass / http://127.0.0.1:8081/

ProxyPassReverse / http://127.0.0.1:8081/

</VirtualHost>


인증서 생성

$ sudo mkdir -p /etc/pki/tls/private

$ sudo mkdir -p /etc/pki/tls/certs

$ sudo openssl req -x509 -days 3650 \

-nodes -newkey rsa:2048 \

-keyout /etc/pki/tls/private/server.key -keyform pem \

-out /etc/pki/tls/certs/server.crt -outform pem



-----

Country Name (2 letter code) [AU]:KO

State or Province Name (full name) [Some-State]:Seoul

Locality Name (eg, city) []:Seoul

Organization Name (eg, company) [Internet Widgits Pty Ltd]:MyCompany

Organizational Unit Name (eg, section) []:

Common Name (e.g. server FQDN or YOUR name) []:myhost.mycompany.com

Email Address []:admin@myhost.mycompany.com

$ cd /etc/pki/tls/certs

sudo cp server.crt server-chain.crt



user.email 과 user.name 등록

$ git config user.name "Seungkyu Ahn"

$ git config user.email "skanddh@gmail.com"


password 등록

git config credential.helper cache                             # default 15분 저장

$ git config credential.helper 'cache --timeout=3600'      # 1시간 저장


커밋 메세지 hook 설치

curl -Lo .git/hooks/commit-msg http://localhost:8080/tools/hooks/commit-msg

$ chmod +x .git/hooks/commit-msg


review (gerrit remote url 등록)

git remote add gerrit http://localhost:8080/hello-project


# 서버 프로젝트에 미리 등록해서 clone 시 다운받을 수 있도록 함

$ vi .gitreview


[gerrit]

host=localhost

port=8080

project=hello-project

defaultbranch=master


$ git checkout -b bug/1

수정1

$ git add

$ git commit

$ git review

수정2

$ git add

$ git commit --amend

$ git review



review (직접하는 방법)

$ git checkout -b bug/1

수정1

$ git add

$ git commit

git push origin HEAD:refs/for/master%topic=bug/1



[ Jenkins 설치 ]

jenkins tomcat 의 webapps 디렉토리에 다운로드

# adduser jenkins

# chown -R jenkins.jenkins apache-tomcat-8.0.26

# su - jenkins


http://jenkins-ci.org/

$ cd /usr/local/apache-tomcat-8.0.26/webapps

$ wget http://updates.jenkins-ci.org/download/war/1.580.1/jenkins.war

wget http://mirrors.jenkins-ci.org/war/latest/jenkins.war                       # 최신 버전


tomcat 포트 및 URIEndoing 변경

$ vi /usr/local/apache-tomcat-8.0.26/conf/server.xml


<Connector port="7070" protocol="HTTP/1.1"

           connectionTimeout="20000"

           redirectPort="8443"

           URIEncoding="UTF-8" />


/usr/local/apache-tomcat-8.0.26/bin/startup.sh


jenkins 접속

http://192.168.75.141:7070/jenkins/


웹화면에서 보안 설정

(좌측메뉴) Jenkins 관리

Configure Global Security

  - Enable security

  - Security Realm : Jenkins’ own user database

  - Authorization : Matrix-based security

  - User/group to add: admin


저장 후 admin 계정으로 가입



[ Jenkins 연동 ]

1. 젠킨스 플러그인 설치

1. Jenkins Git Client plugin

2. Jenkins Git Plugin : 젠킨스와 깃을 연동

3. Jenkins Gerrit Trigger plugin : 게릿 변경시 패치 세트를 가져와 빌드하고 점수를 남김

4. Hudson Gerrit plugin : 깃 플러그인 설정을 가능


2. 게릿 트리거 플러그인

1. HTTP/S Canonical URL: 게릿의 변경 및 패치 세트를 가리키는 URL

2. SSH 접속 : 게릿에 연결하여 게릿으로부터의 이벤트를 감지


jenkins를 띄운 사용자로 ssh 키 생성 및 게릿에 젠킨스가 사용할 배치 사용자를 생성

jenkins 계정으로 jenkins 를 실행하면 아래 내부 사용자 생성이 필요없음

사용자가 다르면 게릿의 관리자 계정으로 create-account 명령을 실행해서 내부 사용자를 생성

$ skanddh 계정으로 로그인

ssh-keygen -t rsa

ssh -p 29418 skanddh@192.168.75.141


# skanddh 가 gerrit 의 관리자 계정이어야 하며 skanddh 계정으로 실행

$ sudo cat /home/jenkins/.ssh/id_rsa.pub | \

ssh -p 29418 skanddh@192.168.75.141 gerrit create-account \

--group "'Non-Interactive Users'" --full-name Jenkins \

--email jenkins@localhost.com \ --ssh-key - jenkins


All-Projects에 있는 Non-Interactive Users 그룹에 아래의 권한이 있는지 확인

1. Stream events 권한이 있으면 게릿의 변경 발생을 원격으로 감지

2. refs/* 에 Read 권한이 있으면 gerrit 저장소의 변경 사항을 읽고 clone 가능

3. refs/heads/* 에 대한 Label Code-Review(Verified) -1..+1 권한이 잇으면 변경에 대해 점수 부여 가능


게릿 트리거 플러그인 설정

jenkins url 접속

http://192.168.75.141:7070/jenkins/gerrit-trigger


1. URL 과 SSH 연결을 설정

    Name : Gerrit

    Hostname : 192.168.75.141

    Frontend URL : http://192.168.75.141:8080

    SSH Port : 29418

    Username : jenkins

    E-mail : jenkins@localhost.com

    SSH Keyfile : /home/jenkins/.ssh/id_rsa

    SSH Keyfile Password :


2. Test Connection 으로 테스트

3. 설정 페이지 맨 아래 Start/Stop 버튼으로 젠킨스 재시작


jenkins url 접속

http://192.168.75.141:7070/jenkins/gerrit_manual_trigger

Query 입력 란에 status:open 입력 -> Search 버튼 클릭

http://192.168.75.141:8080/#q/status:open,n,z 페이지에서 리뷰 대기 중인 변경 확인


게릿 트리거 설정

게릿 트리거 실행 조건을 SCM polling(또는 다른 트리거 정책)에서 Gerrit Event 로 변경

게릿 트리거 설정 부분에서 Advanced 버튼으로 게릿 조건을 지정


깃 플러그인 설정 (Hudson Gerrit plugin 을 설치해야 나옴)

깃 플러그인에서 게릿의 ref-spec 다음에 추가

Advanced 버튼 클릭 하여 깃 저장소 설정 변경

1. $GERRIT_REFSPEC 을 복제해야 할 깃 refspec 으로 지정

2. $GERRIT_PATCHSET_REVISION을 빌드할 깃 브랜치로 지정

3. 트리거 방식을 Gerrit trigger로 지정


아래 두가지 활성화

1. Wipe out workspace : 작업 공간 비우기

2. Use shallow clone : 얕은 복제 사용





반응형
Posted by seungkyua@gmail.com
,
반응형

1. apt-get 을 위한 키 다운로드

$ wget -qO - http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -


2. repositoriy 등록

vi /etc/apt/sources.list


deb http://packages.elasticsearch.org/elasticsearch/1.4/debian stable main


3. install

apt-get update && apt-get install elasticsearch


4. 설치 위치

- binary 설치 위치

/usr/share/elasticsearch


- 구성파일

/etc/elasticsearch/elasticsearch.yml


- init 스크립트

/etc/init.d/elasticsearch


- 환경설정 파일

/etc/default/elasticsearch




반응형
Posted by seungkyua@gmail.com
,
반응형

1. Android Studio 를 다운받는다.

http://developer.android.com/sdk/installing/studio.html


2. Android 버전 플랫폼 정보

http://developer.android.com/about/dashboards/index.html


3. Android Action Bar 정보

http://developer.android.com/guide/topics/ui/actionbar.html


4. Android Virtual Device Manager (AVD Manager)

4.0" WVGA (Nexus S) (480 x 800: xhdpi) : Android 2.2 - API Level 8

Galaxy Nexus (4.65", 720 x 1280: xhdpi) : Android 4.0 - API Level 14

Nexus One (3.7", 480 x 800: hdpi) : Android 2.1 - API Level 7


5.AVD Manager 화면을 90도로 돌리기

ctrl + F11







반응형
Posted by seungkyua@gmail.com
,
반응형

사전에 hadoop 을 먼저 설치합니다.

http://www.ahnseungkyu.com/150


1. git source 받기

$ git clone https://git-wip-us.apache.org/repos/asf/tajo.git tajo


2. ubuntu 12.04 LTS 에서 git 으로 apache https 접속 에러시 해결

error: gnutls_handshake() failed: A TLS packet with unexpected length was received. while accessing https://git-wip-us.apache.org/repos/asf/tajo.git/info/refs

fatal: HTTP request failed


$ sudo apt-get install build-essential fakeroot dpkg-dev

$ mkdir ~/git-openssl

$ cd ~/git-openssl

$ sudo apt-get source git

$ sudo apt-get build-dep git

$ sudo apt-get install libcurl4-openssl-dev

$ sudo dpkg-source -x git_1.7.9.5-1.dsc

$ cd git-1.7.9.5

$ sudo vi debian/control


:%s/libcurl4-gnutls-dev/libcurl4-openssl-dev/g              # 참조 파일을 변경


$ sudo dpkg-buildpackage -rfakeroot -b


# 테스트 에러가 발생하면debian/rules 파일에서 Test 삭제

$ sudo vi debian/rules

TEST=test                # 해당 라인 삭제


sudo dpkg -i ../git_1.7.9.5-1_amd64.deb


3. Tajo 소스 빌드

$ cd tajo

$ mvn clean package -DskipTests -Pdist -Dtar


4. Tajo 바이너리 설치 (현재 버전은 0.9.0 임)

$ cd

$ tar xzvf /home/stack/Git/tajo/tajo-dist/target/tajo-0.9.0-SNAPSHOT.tar.gz


5. tajo-env.sh 설정

$ cd tajo-0.9.0-SNAPSHOT

$ vi conf/tajo-env.sh


export HADOOP_HOME=/home/stack/hadoop-2.4.0

export JAVA_HOME=/usr/local/jdk1.7.0_51


6. tajo 실행

$ cd bin

$ ./start-tajo.sh


7. 테스트

$ mkdir -p table1

$ cd table1

$ cat > data.csv

1|abc|1.1|a

2|def|2.3|b

3|ghi|3.4|c

4|jkl|4.5|d

5|mno|5.6|e

<CTRL + D>


# hadoop fs 에 올리기

$ hadoop fs -moveFromLocal data.csv /

$ hadoop fs -ls /

Found 1 items 

-rw-r--r--   3 stack supergroup         60 2014-06-05 17:32 /data.csv


# tajo 로 검색하기

$ cd ../bin

$ ./tsql


# 로컬파일로 테이블 생성하기

default> create external table table1 (id int, name text, score float, type text) using csv with ('csvfile.delimiter'='|') location 'file:/home/stack/tajo-0.9.0-SNAPSHOT/table1';


# hdfs 로 테이블 생성하기

default> create external table hdfs_table1 (id int, name text, score float, type text) using csv with ('csvfile.delimiter'='|') location 'hdfs://localhost:9000/data.csv';


default> \d table1

default> select * from hdfs_table1 where id > 2;



반응형
Posted by seungkyua@gmail.com
,
반응형

ubuntu 12.04 LTS 기반으로 설치


1. Java 설치

    http://www.ahnseungkyu.com/139


2. 패키지 설치

$ sudo apt-get install build-essential maven cmake libssl-dev


3. proxy 를 사용한다면 다음을 수정

$ vi /home/stack/.m2/settings.xml


<settings>

  <proxies>

    <proxy>

      <active>true</active>

      <protocol>http</protocol>

      <host>xx.xx.xx.xx</host>

      <port>8080</port>

      <nonProxyHosts>localhost|127.0.0.1|192.168.75.136|192.168.230.136|ubuntu</nonProxyHosts>

...


$ cd /Hadoop-src/hadoop-2.4.0-src/hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads

$ wget http://archive.apache.org/dist/tomcat/tomcat-6/v6.0.36/bin/apache-tomcat-6.0.36.tar.gz


$ keytool -v -alias mavensrv -import \

> -file /usr/share/ca-certificates/extra/XXX.crt \

-keystore trust.jks


4. protocol buffer 소스 다운로드, 컴파일, 설치 (2.5 이상을 설치)

$ wget https://protobuf.googlecode.com/files/protobuf-2.5.0.tar.gz

$ tar xvfz protobuf-2.5.0.tar.gz

$ cd protobuf-2.5.0

$ ./configure

$ make

$ sudo make install                              # /usr/local/lib 에 관련 라이브러리가 설치됨

$ sudo ldconfig


5. Hadoop 소스 다운로드 및 패키징

$ wget http://apache.mirror.cdnetworks.com/hadoop/common/hadoop-2.4.0/hadoop-2.4.0-src.tar.gz

$ tar xvfz hadoop-2.4.0-src.tar.gz

$ cd hadoop-2.4.0-src

$ mvn package -Pdist,native -DskipTests -Dtar -X


6. 소스 파일 및 컴파일 된 바이너리 파일 찾기

$ cd ./hadoop-dist/target


$ cp -R ./hadoop-2.4.0/ ~/.


7. 하둡 환경변수 설정

$ vi ~/.bashrc


# Hadoop

export HADOOP_PREFIX="/home/stack/hadoop-2.4.0"

export PATH=$PATH:$HADOOP_PREFIX/bin

export PATH=$PATH:$HADOOP_PREFIX/sbin

export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}

export HADOOP_COMMON_HOME=${HADOOP_PREFIX}

export HADOOP_HDFS_HOME=${HADOOP_PREFIX}

export YARN_HOME=${HADOOP_PREFIX}


# Native Path

export HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_PREFIX}/lib/native

export HADOOP_OPTS="-Djava.library.path=$HADOOP_PREFIX/lib/native"


$ source ~/.bashrc


8. local 에서 ssh 자동 접속 설정

$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys


9. hadoop-env.sh 설정

$ vi $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh


export JAVA_HOME="/usr/local/jdk1.7.0_51"

export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_PREFIX/lib/native

export HADOOP_OPTS="-Djava.library.path=$HADOOP_PREFIX/lib/native"


10. yarn-env.sh 설정

$ vi $HADOOP_PREFIX/etc/hadoop/yarn-env.sh


export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}

export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_PREFIX/lib/native

export HADOOP_OPTS="-Djava.library.path=$HADOOP_PREFIX/lib/native"


11. Hadoop 데이터 및 시스템 파일을 저장할 디렉토리 생성 (hdfs-site.xml, mapred-site.xml 파일 참조)

$ mkdir -p ${HADOOP_PREFIX}/hadoop/dfs/name

$ mkdir -p ${HADOOP_PREFIX}/hadoop/dfs/data

$ mkdir -p ${HADOOP_PREFIX}/hadoop/mapred/system

$ mkdir -p ${HADOOP_PREFIX}/hadoop/mapred/local


12. core-site.xml 설정

$ vi $HADOOP_PREFIX/etc/hadoop/core-site.xml


<configuration>

    <property>

        <name>fs.default.name</name>

        <value>hdfs://localhost:9000</value>

        <final>true</final>

    </property>

</configuration>


13. hdfs-site.xml 설정

$ vi $HADOOP_PREFIX/etc/hadoop/hdfs-site.xml


<configuration>

    <property>

        <name>dfs.namenode.name.dir</name>

        <value>file:/home/stack/hadoop-2.4.0/hadoop/dfs/name</value>

        <final>true</final>

    </property>


    <property>

        <name>dfs.datanode.data.dir</name>

        <value>file:/home/stack/hadoop-2.4.0/hadoop/dfs/data</value>

        <final>true</final>

    </property>


    <property>

        <name>dfs.permissions</name>

        <value>false</value>

    </property>

</configuration>


14. mapred-site.xml 설정

$ cp $HADOOP_PREFIX/etc/hadoop/mapred-site.xml.template $HADOOP_PREFIX/etc/hadoop/mapred-site.xml

$ vi $HADOOP_PREFIX/etc/hadoop/mapred-site.xml


<configuration>

    <property>

        <name>mapreduce.framework.name</name>

        <value>yarn</value>

    </property>


    <property>

        <name>mapred.system.dir</name>

        <value>file:/home/stack/hadoop-2.4.0/hadoop/mapred/system</value>

        <final>true</final>

    </property>


    <property>

        <name>mapred.local.dir</name>

        <value>file:/home/stack/hadoop-2.4.0/hadoop/mapred/local</value>

        <final>true</final>

    </property>

</configuration>


15. yarn-site.xml 설정

$ vi $HADOOP_PREFIX/etc/hadoop/yarn-site.xml


<configuration>


<!-- Site specific YARN configuration properties -->

    <property>

        <name>yarn.nodemanager.aux-services</name>

        <value>mapreduce_shuffle</value>

    </property>


    <property>

        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>

        <value>org.apache.hadoop.mapred.ShuffleHandler</value>

    </property>

</configuration>


16. NameNode 포맷

hdfs namenode -format


17. 데몬 실생

$ cd ${HADOOP_PREFIX}/sbin


# hdfs 데몬 실행

$ start-all.sh


# yarn 데몬 실행

$ start-yarn.sh


18. 하둡 데몬 확인 (Pseudo-Distributed Mode 일 때 5개가 떠 있어야 함)

$ jps


13861 NameNode                             # 네임노드

14347 SecondaryNameNode              # 세컨더리 네임노드

14070 DataNode                               # 데이터 노드

14526 ResourceManager                  # yarn 리소스 매니저 (네임노드)

14745 NodeManager                        # yarn 노드 매니저 (데이터노드)


# yarn Resource Manager 접속

http://localhost:8088


# yarn Node Manager 접속

http://localhost:8042/node


# 네임노드 접속

http://localhost:50070


# 노드 리포트

$ hdfs dfsadmin -report





반응형
Posted by seungkyua@gmail.com
,
반응형

%% 폴더 삭제하기 - rmdir.bat %%

@IF '%1'=='' GOTO USAGE

@IF '%2'=='' GOTO USAGE

@FOR /R %1 %%1 IN (%2) DO RMDIR /S /Q "%%1"

@PAUSE 

GOTO END


:USAGE

@ECHO rmrcv [시작폴더위치] [폴더명]


:END



%% 파일 삭제하기 - delfile.bat %%


@IF '%1'=='' GOTO USAGE

@IF '%2'=='' GOTO USAGE

@FOR /R %1 %%1 IN (%2) DO DEL /S /Q "%%1"

@PAUSE 

GOTO END


:USAGE

@ECHO delrcv[시작폴더위치] [파일명]


:END

반응형
Posted by seungkyua@gmail.com
,