반응형

[ Mac vmware 에 설치한 Ubuntu 에 vt-x 활성화하기 위해 vmx 파일 수정]

vhv.enable = "TRUE"


[ ssh server 설치 ]

sudo apt-get install -y openssh-server


[ 구조 설명 ]

Cloud Controller

    - hostname : controller

    - eth0 : 192.168.75.131

    - eth1 : 192.168.230.131

    - 설치 모듈 : mysql, rabbitMQ, keystone, glance, nova-api,

                       cinder-api, cinder-scheduler, cinder-volume, open-iscsi, iscsitarget

                       quantum-server

Network

    - hostname : network

    - eth0 : 192.168.75.132

    - eth1 : 192.168.230.132

    - eth2 : 

    - eth3 : 192.168.75.133

    - 설치 모듈 : openvswitch-switch openvswitch-datapath-dkms

                       quantum-plugin-openvswitch-agent dnsmasq quantum-dhcp-agent quantum-l3-agent

Compute

    - hostname : compute

    - eth0 : 192.168.75.134

    - eth1 : 192.168.230.134

    - eth2 : 

    - 설치 모듈 : openvswitch-switch openvswitch-datapath-dkms 

                       quantum-plugin-openvswitch-agent, nova-compute-kvm, open-iscsi, iscsitarget


[ network 설정 ]

eth0 : public 망 (NAT)                          192.168.75.0/24

eth1 : private host 망 Custom(VMnet2)  192.168.230.0/24

eth2 : vm private 망                             10.0.0.0/24

eth3 : vm Quantum public 망(NAT)        192.168.75.0/26


[ hostname 변경 ]

vi /etc/hosts

192.168.230.131 controller

192.168.230.132 network

192.168.230.134 compute


vi /etc/hostname

   controller


hostname -F /etc/hostname

새로운 터미널로 확인


[ eth0 eth1 설정 ]

vi /etc/network/interfaces


# The loopback network interface

auto lo

iface lo inet loopback


# Host Public 망

auto eth0

iface eth0 inet static

      address 192.168.75.131

      netmask 255.255.255.0

      gateway 192.168.75.2

      dns-nameservers 8.8.8.8 8.8.4.4


# Host Private 망

auto eth1

iface eth1 inet static

      address 192.168.230.131

      netmask 255.255.255.0


service networking restart


[ vmware 에 설치한 Ubuntu 에서 가상화를 지원하는지 확인 ]

egrep '(vmx|svm)' --color=always /proc/cpuinfo


[ nova 설치 매뉴얼 ]

https://github.com/mseknibilel/OpenStack-Grizzly-Install-Guide/blob/master/OpenStack_Grizzly_Install_Guide.rst


[ nova 소스 위치 ]

nova link source = /usr/lib/python2.7/dist-packages/nova

nova original source = /usr/share/pyshared/nova


##################   모든 node 공통 설치하기   #####################


[ root 패스워드 세팅 ]

sudo su -

passwd


[ repository upgrade ]

apt-get install -y ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring


echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main >> /etc/apt/sources.list.d/grizzly.list


apt-get update

apt-get upgrade

apt-get dist-upgrade


[ screen vim 설치 ]

sudo apt-get install -y screen vim


[ .screenrc ]

vbell off

autodetach on

startup_message off

defscrollback 1000

attrcolor b ".I"

termcap xterm 'Co#256:AB=\E[48;5;%dm:AF=\E[38;5;%dm'

defbce "on"

#term screen-256color


## apps I want to auto-launch

#screen -t irssi irssi

#screen -t mutt mutt


## statusline, customized. (should be one-line)

hardstatus alwayslastline '%{gk}[ %{G}%H %{g}][%= %{wk}%?%-Lw%?%{=b kR}[%{W}%n%f %t%?(%u)%?%{=b kR}]%{= kw}%?%+Lw%?%?%= %{g}][%{Y}%l%{g}]%{=b C}[ %D %m/%d %C%a ]%{W}'


[ .vimrc ]

syntax on

set nocompatible

set number

set backspace=indent,eol,start

set tabstop=4

set shiftwidth=4

set autoindent

set visualbell

set laststatus=2

set statusline=%h%F%m%r%=[%l:%c(%p%%)]

set hlsearch

set background=dark

set expandtab

set tags=./tags,./TAGS,tags,TAGS,/usr/share/pyshared/nova/tags

set et

" Removes trailing spaces
function! TrimWhiteSpace()
    %s/\s\+$//e
endfunction

nnoremap <silent> <Leader>rts :call TrimWhiteSpace()<CR>
autocmd FileWritePre    * :call TrimWhiteSpace()
autocmd FileAppendPre   * :call TrimWhiteSpace()
autocmd FilterWritePre  * :call TrimWhiteSpace()
autocmd BufWritePre     * :call TrimWhiteSpace()


[ remove dmidecode ]

apt-get purge dmidecode

apt-get autoremove

kill -9 [dmidecode process]


[ root 일 때 nova 계정이 없을 경우 유저 및 권한 설정 ]

adduser nova


visudo

   nova     ALL=(ALL:ALL) NOPASSWD:ALL


[ ntp 설치 ]

apt-get install -y ntp


vi /etc/ntp.conf

#server 0.ubuntu.pool.ntp.org

#server 1.ubuntu.pool.ntp.org

#server 2.ubuntu.pool.ntp.org

#server 3.ubuntu.pool.ntp.org

server time.bora.net


service ntp restart


# 한국 시간 세팅 및 최초 시간 맞추기

ntpdate -u time.bora.net

ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime


[ mysql client 설치 ]

apt-get install -y python-mysqldb mysql-client-5.5


[ KVM 설치 및 확인 ]

apt-get install -y cpu-checker

apt-get install -y kvm libvirt-bin pm-utils

kvm-ok


# kvm 이 load 되어 있는지 확인하기

lsmod | grep kvm


# 서버 reboot 시에 kvm 자동 load 추가

vi /etc/modules

   kvm

   kvm_intel


vi /etc/libvirt/qemu.conf

   cgroup_device_acl = [

   "/dev/null", "/dev/full", "/dev/zero",

   "/dev/random", "/dev/urandom",

   "/dev/ptmx", "/dev/kvm", "/dev/kqemu",

   "/dev/rtc", "/dev/hpet","/dev/net/tun"

   ]


# delete default virtual bridge

virsh net-destroy default

virsh net-undefine default


# enable live migration

vi /etc/libvirt/libvirtd.conf

   listen_tls = 0

   listen_tcp = 1

   auth_tcp = "none"


vi /etc/init/libvirt-bin.conf

   env libvirtd_opts="-d -l"


vi /etc/default/libvirt-bin

   libvirtd_opts="-d -l"


service dbus restart

service libvirt-bin restart


[ bridge 설치 ]

apt-get install -y vlan bridge-utils


[ IP_Forwarding 설정 ]

sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf

sysctl net.ipv4.ip_forward=1


##################   Cloud Controller 설치하기   #####################


[ ntp 세팅 ]

vi /etc/ntp.conf

   server time.bora.net

service ntp restart


[ network 세팅 ]

vi /etc/network/interfaces


# The loopback network interface

auto lo

iface lo inet loopback


# Host Public 망

auto eth0

iface eth0 inet static

      address 192.168.75.131

      netmask 255.255.255.0

      gateway 192.168.75.2

      dns-nameservers 8.8.8.8 8.8.4.4


# Host Private 망

auto eth1

iface eth1 inet static

      address 192.168.230.131

      netmask 255.255.255.0


service networking restart


[ hostname 변경 ]

vi /etc/hosts

192.168.230.131 controller

192.168.230.132 network

192.168.230.134 compute


vi /etc/hostname

   controller


hostname -F /etc/hostname


[ mysql db 설치 ]

apt-get install -y python-mysqldb mysql-server                 password : 임시 패스워드

sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf

service mysql restart


[ rabbitmq server install ]

apt-get install -y rabbitmq-server


# user 변환

sudo su - nova


[ Database 세팅 ]

mysql -u root -p

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY '임시 패스워드';


CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY '임시 패스워드';


CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY '임시 패스워드';


CREATE DATABASE quantum;

GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'%' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'localhost' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'controller' IDENTIFIED BY '임시 패스워드';


CREATE DATABASE cinder;

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '임시 패스워드';

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'controller' IDENTIFIED BY '임시 패스워드';


# grant 가 안될 때

use mysql;


UPDATE user SET

Select_priv = 'Y',

Insert_priv = 'Y',

Update_priv = 'Y',

Delete_priv = 'Y',

Create_priv = 'Y',

Drop_priv = 'Y',

Reload_priv = 'Y',

Shutdown_priv = 'Y',

Process_priv = 'Y',

File_priv = 'Y',

Grant_priv = 'Y',

References_priv = 'Y',

Index_priv = 'Y',

Alter_priv = 'Y',

Show_db_priv = 'Y',

Super_priv = 'Y',

Create_tmp_table_priv = 'Y',

Lock_tables_priv = 'Y',

Execute_priv = 'Y',

Repl_slave_priv = 'Y',

Repl_client_priv = 'Y',

Create_view_priv = 'Y',

Show_view_priv = 'Y',

Create_routine_priv = 'Y',

Alter_routine_priv = 'Y',

Create_user_priv = 'Y',

Event_priv = 'Y',

Trigger_priv = 'Y',

Create_tablespace_priv = 'Y'

WHERE user IN ('keystone', 'glance', 'nova', 'quantum', 'cinder');


[ keystone 설치 ]

sudo apt-get install -y keystone

sudo service keystone status

sudo rm /var/lib/keystone/keystone.db


sudo vi /etc/keystone/keystone.conf

connection = mysql://keystone:임시 패스워드@controller/keystone

token_format = UUID


sudo service keystone restart

sudo keystone-manage db_sync


[ keystone 세팅 ]

vi keystone_basic.sh

#!/bin/sh

#

# Keystone basic configuration 


# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh


# Modified by Bilel Msekni / Institut Telecom

#

# Support: openstack@lists.launchpad.net

# License: Apache Software License (ASL) 2.0

#

HOST_IP=192.168.230.131

ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin_pass}

SERVICE_PASSWORD=${SERVICE_PASSWORD:-service_pass}

export SERVICE_TOKEN="ADMIN"

export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"

SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}


get_id () {

    echo `$@ | awk '/ id / { print $4 }'`

}


# Tenants

ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)

SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)



# Users

ADMIN_USER=$(get_id keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com)



# Roles

ADMIN_ROLE=$(get_id keystone role-create --name=admin)

KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)

KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)


# Add Roles to Users in Tenants

keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT

keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONEADMIN_ROLE --tenant-id $ADMIN_TENANT

keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONESERVICE_ROLE --tenant-id $ADMIN_TENANT


# The Member role is used by Horizon and Swift

MEMBER_ROLE=$(get_id keystone role-create --name=Member)


# Configure service users/roles

NOVA_USER=$(get_id keystone user-create --name=nova --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=nova@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE


GLANCE_USER=$(get_id keystone user-create --name=glance --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE


QUANTUM_USER=$(get_id keystone user-create --name=quantum --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=quantum@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $QUANTUM_USER --role-id $ADMIN_ROLE


CINDER_USER=$(get_id keystone user-create --name=cinder --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com)

keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE


vi keystone_endpoints_basic.sh

#!/bin/sh

#

# Keystone basic Endpoints


# Mainly inspired by https://github.com/openstack/keystone/blob/master/tools/sample_data.sh


# Modified by Bilel Msekni / Institut Telecom

#

# Support: openstack@lists.launchpad.net

# License: Apache Software License (ASL) 2.0

#


# Host address

HOST_IP=192.168.230.131

EXT_HOST_IP=192.168.75.131

VOLUME_HOST_IP=192.168.230.131

VOLUME_EXT_HOST_IP=192.168.75.131

NETWORK_HOST_IP=192.168.230.132

NETWORK_EXT_HOST_IP=192.168.75.133


# MySQL definitions

MYSQL_USER=keystone

MYSQL_DATABASE=keystone

MYSQL_HOST=$HOST_IP

MYSQL_PASSWORD=임시 패스워드


# Keystone definitions

KEYSTONE_REGION=RegionOne

export SERVICE_TOKEN=ADMIN

export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"


while getopts "u:D:p:m:K:R:E:T:vh" opt; do

  case $opt in

    u)

      MYSQL_USER=$OPTARG

      ;;

    D)

      MYSQL_DATABASE=$OPTARG

      ;;

    p)

      MYSQL_PASSWORD=$OPTARG

      ;;

    m)

      MYSQL_HOST=$OPTARG

      ;;

    K)

      MASTER=$OPTARG

      ;;

    R)

      KEYSTONE_REGION=$OPTARG

      ;;

    E)

      export SERVICE_ENDPOINT=$OPTARG

      ;;

    T)

      export SERVICE_TOKEN=$OPTARG

      ;;

    v)

      set -x

      ;;

    h)

      cat <<EOF

Usage: $0 [-m mysql_hostname] [-u mysql_username] [-D mysql_database] [-p mysql_password]

       [-K keystone_master ] [ -R keystone_region ] [ -E keystone_endpoint_url ] 

       [ -T keystone_token ]

          

Add -v for verbose mode, -h to display this message.

EOF

      exit 0

      ;;

    \?)

      echo "Unknown option -$OPTARG" >&2

      exit 1

      ;;

    :)

      echo "Option -$OPTARG requires an argument" >&2

      exit 1

      ;;

  esac

done  


if [ -z "$KEYSTONE_REGION" ]; then

  echo "Keystone region not set. Please set with -R option or set KEYSTONE_REGION variable." >&2

  missing_args="true"

fi


if [ -z "$SERVICE_TOKEN" ]; then

  echo "Keystone service token not set. Please set with -T option or set SERVICE_TOKEN variable." >&2

  missing_args="true"

fi


if [ -z "$SERVICE_ENDPOINT" ]; then

  echo "Keystone service endpoint not set. Please set with -E option or set SERVICE_ENDPOINT variable." >&2

  missing_args="true"

fi


if [ -z "$MYSQL_PASSWORD" ]; then

  echo "MySQL password not set. Please set with -p option or set MYSQL_PASSWORD variable." >&2

  missing_args="true"

fi


if [ -n "$missing_args" ]; then

  exit 1

fi

 

keystone service-create --name nova --type compute --description 'OpenStack Compute Service'

keystone service-create --name cinder --type volume --description 'OpenStack Volume Service'

keystone service-create --name glance --type image --description 'OpenStack Image Service'

keystone service-create --name keystone --type identity --description 'OpenStack Identity'

keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service'

keystone service-create --name quantum --type network --description 'OpenStack Networking service'


create_endpoint () {

  case $1 in

    compute)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s'

    ;;

    volume)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$VOLUME_EXT_HOST_IP"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$VOLUME_HOST_IP"':8776/v1/$(tenant_id)s'

    ;;

    image)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':9292/v2' --adminurl 'http://'"$HOST_IP"':9292/v2' --internalurl 'http://'"$HOST_IP"':9292/v2'

    ;;

    identity)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':5000/v2.0' --adminurl 'http://'"$HOST_IP"':35357/v2.0' --internalurl 'http://'"$HOST_IP"':5000/v2.0'

    ;;

    ec2)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8773/services/Cloud' --adminurl 'http://'"$HOST_IP"':8773/services/Admin' --internalurl 'http://'"$HOST_IP"':8773/services/Cloud'

    ;;

    network)

    keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$NETWORK_EXT_HOST_IP"':9696/' --adminurl 'http://'"$NETWORK_HOST_IP"':9696/' --internalurl 'http://'"$NETWORK_HOST_IP"':9696/'

    ;;

  esac

}


for i in compute volume image object-store identity ec2 network; do

  id=`mysql -h "$MYSQL_HOST" -u "$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" -ss -e "SELECT id FROM service WHERE type='"$i"';"` || exit 1

  create_endpoint $i $id

done


# keystone 접근 어드민 

vi creds

unset http_proxy

unset https_proxy

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=admin_pass

export OS_AUTH_URL="http://controller:5000/v2.0/"


source creds

keystone user-list


[ Glance 설치 ]

sudo apt-get install -y glance

sudo rm /var/lib/glance/glance.sqlite

sudo service glance-api status

sudo service glance-registry status


sudo vi /etc/glance/glance-api-paste.ini

[filter:authtoken]

paste.filter_factory = keystone.middleware.auth_token:filter_factory

delay_auth_decision = true

auth_host = 192.168.230.141

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass


sudo vi /etc/glance/glance-registry-paste.ini

[filter:authtoken]

paste.filter_factory = keystone.middleware.auth_token:filter_factory

auth_host = 192.168.230.141

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass


sudo vi /etc/glance/glance-api.conf

sql_connection = mysql://glance:임시 패스워드@192.168.230.141/glance

enable_v1_api = True

enable_v2_api = True


[paste_deploy]

flavor=keystone


sudo vi /etc/glance/glance-registry.conf

sql_connection = mysql://glance:임시 패스워드@192.168.230.141/glance


[paste_deploy]

flavor=keystone


sudo glance-manage db_sync

sudo service glance-registry restart

sudo service glance-api restart


[ Image 등록 ]

mkdir images

cd images

wget --no-check-certificate https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img

glance image-create --name cirros --is-public true --container-format bare --disk-format qcow2 < cirros-0.3.0-x86_64-disk.img

glance image-list


[ Nova-api, scheduler 설치 ]

sudo apt-get install -y nova-api nova-scheduler nova-cert novnc nova-consoleauth nova-novncproxy nova-doc nova-conductor


mysql -uroot -p임시 패스워드 -e 'CREATE DATABASE nova;'

mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '임시 패스워드';"

mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '임시 패스워드';"


sudo vi /etc/nova/api-paste.ini

   [filter:authtoken]

   paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory

   auth_host = 192.168.230.141

   auth_port = 35357

   auth_protocol = http

   admin_tenant_name = service

   admin_user = nova

   admin_password = service_pass

   signing_dir = /tmp/keystone-signing-nova

   # Workaround for https://bugs.launchpad.net/nova/+bug/1154809

   auth_version = v2.0



sudo vi /etc/nova/nova.conf


[DEFAULT]

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/run/lock/nova

verbose=True

api_paste_config=/etc/nova/api-paste.ini

compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler

rabbit_host=192.168.230.141

nova_url=http://192.168.230.141:8774/v1.1/

sql_connection=mysql://nova:imsi00@192.168.230.141/nova

root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf


# Auth

use_deprecated_auth=false

auth_strategy=keystone


# Imaging service

glance_api_servers=192.168.230.141:9292

image_service=nova.image.glance.GlanceImageService


# Vnc configuration

novnc_enabled=true

novncproxy_base_url=http://192.168.75.141:6080/vnc_auto.html

novncproxy_port=6080

vncserver_proxyclient_address=192.168.230.141

vncserver_listen=0.0.0.0


# Network settings

network_api_class=nova.network.quantumv2.api.API

quantum_url=http://192.168.230.143:9696

quantum_auth_strategy=keystone

quantum_admin_tenant_name=service

quantum_admin_username=quantum

quantum_admin_password=service_pass

quantum_admin_auth_url=http://192.168.230.141:35357/v2.0

libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver


#Metadata

service_quantum_metadata_proxy = True

quantum_metadata_proxy_shared_secret = helloOpenStack

metadata_host = 192.168.230.141

metadata_listen = 127.0.0.1

metadata_listen_port = 8775


# Compute #

compute_driver=libvirt.LibvirtDriver


# Cinder #

volume_api_class=nova.volume.cinder.API

osapi_volume_listen_port=5900


sudo nova-manage db sync


# restart nova services

cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done


# check nova services

nova-manage service list


[ Horizon 설치 ]

sudo apt-get install -y openstack-dashboard memcached


# ubuntu 테마 삭제

sudo apt-get purge openstack-dashboard-ubuntu-them


# apache and mecached reload

sudo service apache2 restart

sudo service memcached restart


# browser 접속 url

http://192.168.75.141/horizon/


##################   Cinder 설치하기   #####################


[ ntp 세팅 ]

sudo vi /etc/ntp.conf

   server 192.168.230.141

sudo service ntp restart


[ network 세팅 ]


[ hostname 변경 ]


[ Cinder  설치 ]

sudo apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms

sudo sed -i 's/false/true/g' /etc/default/iscsitarget

sudo vi /etc/iscsi/iscsid.conf

   node.startup = automatic

sudo service iscsitarget start

sudo service open-iscsi start


mysql -uroot -p임시 패스워드 -e 'CREATE DATABASE cinder;'

mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '임시 패스워드';"

mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '임시 패스워드';"


sudo vi /etc/cinder/api-paste.ini

   [filter:authtoken]

   paste.filter_factory = keystone.middleware.auth_token:filter_factory

   service_protocol = http

   service_host = 192.168.75.141

   service_port = 5000

   auth_host = 192.168.230.141

   auth_port = 35357

   auth_protocol = http

   admin_tenant_name = service

   admin_user = cinder

   admin_password = service_pass


sudo vi /etc/cinder/cinder.conf

   [DEFAULT]

   rootwrap_config=/etc/cinder/rootwrap.conf

   sql_connection = mysql://cinder:임시 패스워드@192.168.230.141/cinder

   api_paste_config = /etc/cinder/api-paste.ini

   iscsi_helper=ietadm

   volume_name_template = volume-%s

   volume_group = cinder-volumes

   verbose = True

   auth_strategy = keystone

   rabbit_host = 192.168.230.141


sudo cinder-manage db sync


[ cinder volume 생성 ]

dd if=/dev/zero of=cinder-volumes bs=1 count=0 seek=10G

sudo losetup /dev/loop2 cinder-volumes

sudo fdisk /dev/loop2


1. sudo fdisk -l

2. sudo fdisk /dev/sdb

3. Press ‘n' to create a new disk partition,

4. Press 'p' to create a primary disk partition,

5. Press '1' to denote it as 1st disk partition,

6. Either press ENTER twice to accept the default of 1st and last cylinder – to convert the remainder of hard disk to a single disk partition

   -OR- press ENTER once to accept the default of the 1st, and then choose how big you want the partition to be by specifying +size{K,M,G} 

   e.g. +5G or +6700M.

7. Press 't', then select the new partition you made.

8. Press '8e' change your new partition to 8e, i.e. Linux LVM partition type.

9. Press ‘p' to display the hard disk partition setup. Please take note that the first partition is denoted as /dev/sda1 in Linux.

10. Press 'w' to write the partition table and exit fdisk upon completion.


sudo pvcreate /dev/loop2

sudo vgcreate cinder-volumes /dev/loop2


# 서버 reboot 시에도 자동으로 설정

sudo vi /etc/init.d/cinder-setup-backing-file

losetup /dev/loop2 /home/nova/cinder-volumes

exit 0


sudo chmod 755 /etc/init.d/cinder-setup-backing-file

sudo ln -s /etc/init.d/cinder-setup-backing-file /etc/rc2.d/S10cinder-setup-backing-file


# restart cinder services

cd /etc/init.d/; for i in $( ls cinder-* ); do sudo service $i restart; done


# verify cinder services

cd /etc/init.d/; for i in $( ls cinder-* ); do sudo service $i status; done



##################   Quantum Server 설치하기   #####################


[ ntp 세팅 ]

sudo vi /etc/ntp.conf

   server 192.168.230.141

sudo service ntp restart


[ network 세팅 ]


[ hostname 변경 ]


[ quantum server 설치 ]

sudo apt-get install -y quantum-server

sudo rm -rf /var/lib/quantum/ovs.sqlite


mysql -uroot -p임시 패스워드 -e 'CREATE DATABASE quantum;'

mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'%' IDENTIFIED BY '임시 패스워드';"

mysql -uroot -p임시 패스워드 -e "GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'localhost' IDENTIFIED BY '임시 패스워드';"


sudo vi /etc/quantum/api-paste.ini

   [filter:authtoken]

   paste.filter_factory = keystone.middleware.auth_token:filter_factory

   auth_host = 192.168.230.141

   auth_port = 35357

   auth_protocol = http

   admin_tenant_name = service

   admin_user = quantum

   admin_password = service_pass


sudo vi /etc/quantum/quantum.conf

   rabbit_host = 192.168.230.141


sudo service quantum-server restart

sudo service quantum-server status


##################   Quantum Network 설치하기   #####################


[ ntp 세팅 ]

sudo vi /etc/ntp.conf

   server 192.168.230.141

sudo service ntp restart


[ eth2 vm 용 public 망 추가 - Quantum public network 로 사용 ]

sudo vi /etc/network/interfaces


auto lo

iface lo inet loopback


# host public 망

auto eth0

iface eth0 inet static

      address 192.168.75.144

      netmask 255.255.255.0

      gateway 192.168.75.2

      dns-nameservers 8.8.8.8, 8.8.4.4


# vm private 망, host private 망

auto eth1

iface eth1 inet static

      address 192.168.230.144

      netmask 255.255.255.0


# vm public 망

auto eth2

iface eth2 inet manual

      up ifconfig $IFACE 0.0.0.0 up

      up ip link set $IFACE promisc on

      down ip link set $IFACE promisc off

      down ifconfig $IFACE down


sudo service networking restart


[ hostname 변경 ]


[ openVSwitch 설치 ]

sudo apt-get install -y openvswitch-switch openvswitch-datapath-dkms


# bridge 생성

sudo ovs-vsctl add-br br-int

sudo ovs-vsctl add-br br-ex


[ Quantum openVSwitch agent, dnsmasq, dhcp agent, L3 agent, metadata agent 설치 ]

sudo apt-get install -y quantum-plugin-openvswitch-agent dnsmasq quantum-dhcp-agent quantum-l3-agent quantum-metadata-agent


sudo vi /etc/quantum/api-paste.ini

   [filter:authtoken]

   paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory

   auth_host = 192.168.230.141

   auth_port = 35357

   auth_protocol = http

   admin_tenant_name = service

   admin_user = quantum

   admin_password = service_pass


sudo vi /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini

   [DATABASE]

   sql_connection = mysql://quantum:임시 패스워드@192.168.230.141/quantum


   [OVS]

   tenant_network_type = gre

   enable_tunneling = True

   tunnel_id_ranges = 1:1000

   integration_bridge = br-int

   tunnel_bridge = br-tun

   local_ip = 192.168.230.144


sudo vi /etc/quantum/l3_agent.ini

   # 맨 아랫줄에 추가

   auth_url = http://192.168.230.141:35357/v2.0

   auth_region = RegionOne

   admin_tenant_name = service

   admin_user = quantum

   admin_password = service_pass


sudo vi /etc/quantum/metadata_agent.ini

   auth_url = http://192.168.230.141:35357/v2.0

   auth_region = RegionOne

   admin_tenant_name = service

   admin_user = quantum

   admin_password = service_pass


   nova_metadata_ip = 192.168.230.141

   nova_metadata_port = 8775

   metadata_proxy_shared_secret = helloOpenStack


sudo vi /etc/quantum/quantum.conf

   rabbit_host = 192.168.230.141


# restart Quantum services

cd /etc/init.d/; for i in $( ls quantum-* ); do sudo service $i restart; done


# br-ex 와 public 망과 연결

sudo ovs-vsctl add-port br-ex eth2



##################   Compute 설치하기   #####################


[ ntp 세팅 ]

sudo vi /etc/ntp.conf

   server 192.168.230.141

sudo service ntp restart


[ network 세팅 ]


[ hostname 변경 ]


[ openVSwitch 설치 ]

sudo apt-get install -y openvswitch-switch openvswitch-datapath-dkms


# bridge 생성

sudo ovs-vsctl add-br br-int


[ Quantum openVSwitch agent 설치 ]

sudo apt-get install -y quantum-plugin-openvswitch-agent


sudo vi /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini

   [DATABASE]

   sql_connection = mysql://quantum:imsi00@192.168.230.141/quantum


   [OVS]

   tenant_network_type = gre

   enable_tunneling = True

   tunnel_id_ranges = 1:1000

   integration_bridge = br-int

   tunnel_bridge = br-tun

   local_ip = 192.168.230.145


sudo vi /etc/quantum/quantum.conf

   rabbit_host = 192.168.230.141


   [keystone_authtoken]  ----> ? 필요한 세팅인가?

   auth_host = 192.168.230.141

   auth_port = 35357

   auth_protocol = http

   admin_tenant_name = service

   admin_user = quantum

   admin_password = service_pass

   signing_dir = /var/lib/quantum/keystone-signing


# quantum openVSwitch agent restart

sudo service quantum-plugin-openvswitch-agent restart


[ Nova  설치 ]

sudo apt-get install -y nova-compute-kvm open-iscsi


sudo vi /etc/nova/api-paste.ini

   [filter:authtoken]

   paste.filter_factory = keystone.middleware.auth_token:filter_factory

   auth_host = 192.168.230.141

   auth_port = 35357

   auth_protocol = http

   admin_tenant_name = service

   admin_user = nova

   admin_password = service_pass

   signing_dir = /tmp/keystone-signing-nova

   # Workaround for https://bugs.launchpad.net/nova/+bug/1154809

   auth_version = v2.0


sudo vi /etc/nova/nova-compute.conf

   [DEFAULT]

   libvirt_type=kvm

   libvirt_ovs_bridge=br-int

   libvirt_vif_type=ethernet

   libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

   libvirt_use_virtio_for_bridges=True


sudo vi /etc/nova/nova.conf

   [DEFAULT]

   logdir=/var/log/nova

   state_path=/var/lib/nova

   lock_path=/run/lock/nova

   verbose=True

   api_paste_config=/etc/nova/api-paste.ini

   compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler

   rabbit_host=192.168.230.141

   nova_url=http://192.168.230.141:8774/v1.1/

   sql_connection=mysql://nova:imsi00@192.168.230.141/nova

   root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf


   # Auth

   use_deprecated_auth=false

   auth_strategy=keystone


   # Imaging service

   glance_api_servers=192.168.230.141:9292

   image_service=nova.image.glance.GlanceImageService


   # Vnc configuration

   novnc_enabled=true

   novncproxy_base_url=http://192.168.75.141:6080/vnc_auto.html

   novncproxy_port=6080

   vncserver_proxyclient_address=192.168.230.141

   vncserver_listen=0.0.0.0


   # Network settings

   network_api_class=nova.network.quantumv2.api.API

   quantum_url=http://192.168.230.141:9696

   quantum_auth_strategy=keystone

   quantum_admin_tenant_name=service

   quantum_admin_username=quantum

   quantum_admin_password=service_pass

   quantum_admin_auth_url=http://192.168.230.141:35357/v2.0

   libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

   linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

   firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver


   #Metadata

   service_quantum_metadata_proxy = True

   quantum_metadata_proxy_shared_secret = helloOpenStack

   metadata_host = 192.168.230.141

   metadata_listen = 127.0.0.1

   metadata_listen_port = 8775


   # Compute #

   compute_driver=libvirt.LibvirtDriver


   # Cinder #

   volume_api_class=nova.volume.cinder.API

   osapi_volume_listen_port=5900


# restart nova service

cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done


# nova service status

nova-manage service list



[ Nova 명령어 실행 ]

# admin 권한으로 실행

source creds


# tenant, user 생성

keystone tenant-create --name myproject

keystone role-list

keystone user-create --name=myuser --pass=임시 패스워드 --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 --email=myuser@domain.com

keystone user-role-add --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 --user-id 29736a14d7d4471fa50ca04da38d89b1 --role-id 022cd675521b45ffb94693e7cab07db7


# Network 생성

quantum net-create --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 net_myproject

quantum net-list


# Network 에 internal private subnet 생성

quantum subnet-create --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 --name net_myproject_internal net_myproject 10.0.0.0/24


# Router 생성

quantum router-create --tenant-id d8eca2f95bbf4ddc8bda878fe9669661 net_myproject_router


# L3 agent 를 Router 와 연결

quantum l3-agent-router-add 829f424b-0879-4fee-a373-84c0f0bcbb9b net_myproject_router


# Router 를 Subnet 에 연결

quantum router-interface-add f3e2c02e-2146-4388-b415-c95d45f4f3a3 99189c7b-50cd-4353-9358-2dd74efbb762


# restart quantum services

cd /etc/init.d/; for i in $( ls quantum-* ); do sudo service $i restart; done


# 환경설정파일 생성

vi myproject

export OS_TENANT_NAME=myproject

export OS_USERNAME=myuser

export OS_PASSWORD=임시 패스워드

export OS_AUTH_URL="http://192.168.230.141:5000/v2.0/"


# project 권한으로 진행

source myproject








nova image-list

nova secgroup-list

nova secgroup-add-rule default tcp 22 22 0.0.0.0/0

nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0

ssh-keygen

nova keypair-add --pub_key ~/.ssh/id_rsa.pub mykey

nova keypair-list

nova flavor-list

nova boot test01 --flavor 1 --image 5c4c2339-55bd-4e9b-86cb-23694e3b9b17 --key_name mykey --security_group default


nova floating-ip-list

nova floating-ip-create

nova add-floating-ip 80eb7545-258e-4f26-a842-c1993cb03ae5 192.168.75.225

nova remove-floating-ip 80eb7545-258e-4f26-a842-c1993cb03ae5 192.168.75.225

nova floating-ip-delete 192.168.75.225


nova volume-list

nova volume-create --display_name ebs01 1

nova volume-attach 80eb7545-258e-4f26-a842-c1993cb03ae5 c209e2f1-5ff7-496c-8928-d57487d86c6f /dev/vdb

nova volume-detach 80eb7545-258e-4f26-a842-c1993cb03ae5 a078f20a-62c6-432c-8fa2-7cfd9950a64f

nova volume-delete a078f20a-62c6-432c-8fa2-7cfd9950a64f


# 접속 후 ext4 로 format 및 mount

mke2fs -t ext4 /dev/vdb

mount /dev/vdb /test



[ vnc console 접속 ]

nova get-vnc-console 80eb7545-258e-4f26-a842-c1993cb03ae5 novnc





반응형
Posted by seungkyua@gmail.com
,
반응형

1. EBS Backup

    - EBS를 Snapshot 방식이 아닌 File 방식을 사용하여 Incremental 하게 Backup 하는 서비스


2. Live Migration Boot From Volume

    - iSCSI 기반의 Root 로 Boot 된 VM 을 Live Migration 하는 방식


3. ENI (Elastic Network Interface)

    -  License 가 Mac Address 기반으로 적용되는 경우 Virtual Interface Pool을 만들고, 

        추후 인스턴스 생성 시 지정하여 사용하는 방식


4. Flavor Type 별 Network QoS

    - Instance의 Network Bandwidth를 Flavor Type별로 지정할 수 있는 서비스



반응형
Posted by seungkyua@gmail.com
,
반응형

[ OpenStack Contribution List ]


1. Flavor Type 별 Network QoS 지정


2. Task 별 API 체크


3. Scheduler

    - Filter 와 가중치를 적용하는 방법


4. EBS Backup

    - EBS를 Snapshot 방식이 아닌 File 방식을 사용하여 Incremental하게  Backup을 하는 서비스


5. ENI (Elastic Network Interface)


6. Project to Host Filter Scheduling


7. EBS 기반으로 Boot 된 VM 에 대한 Live Migration



반응형
Posted by seungkyua@gmail.com
,

nova variable

OpenStack/Nova 2012. 7. 21. 17:33
반응형
vi /usr/lib/python2.7/json/encoder.py

import datetime           (4 line 추가)
...
...
elif isinstance(o, datetime.datetime):   (431 line 추가)
    pass
elif o.__module__.startswith('nova'):
    yield str(o)

Json 으로 변환하여 print 하기
import json
import nova.openstack.common import jsonutils  (json 혹은 jsonutils 사용)
...
LOG.debug("image_service = %s", jsonutils.dumps(jsonutils.to_primitive(vars(image_service)), indent=2))


nova.api.openstack.compute.servers.py >> Controller >> create()

inst_type = {
  "memory_mb": 512,
  "root_gb": 0,
  "deleted_at": null,
  "name": "m1.tiny",
  "deleted": false,
  "created_at": null,
  "ephemeral_gb": 0,
  "updated_at": null,
  "disabled": false,
  "vcpus": 1,
  "extra_specs": {},
  "swap": 0,
  "rxtx_factor": 1.0,
  "is_public": true,
  "flavorid": "1",
  "vcpu_weight": null,
  "id": 2
}
image_href = "5c4c2339-55bd-4e9b-86cb-23694e3b9b17"
display_name = "test02"
display_description = "test02"
key_name = "mykey"
metadata = {}
access_ip_v4 = null
access_ip_v6 = null
injected_files = []
admin_password = "TbvbCd2NgA5S"
min_count = 1
max_count = 1
requested_networks = [
  [
    "0802c791-d4aa-473b-94a8-46d2b4aff91b",
    "192.168.100.5"
  ]
]
security_group = [
  "default"
]
user_data = null
availability_zone = null
config_drive = null
block_device_mapping = []
auto_disk_config = null
scheduler_hints = {}



nova.compute.api.py >> API >> _create_instance()

[ DB 신규 row 입력 ]

create_db_entry_for_new_instance

image_service = <nova.image.glance.GlanceImageService object at 0x588c450>

image_id = "5c4c2339-55bd-4e9b-86cb-23694e3b9b17"

image = {
  "status": "active",
  "name": "tty-linux",
  "deleted": false,
  "container_format": "ami",
  "created_at": ,
  "disk_format": "ami",
  "updated_at": ,
  "id": "5c4c2339-55bd-4e9b-86cb-23694e3b9b17",
  "owner": "2ffae825c88b448bad4ef4d14f5c1204",
  "min_ram": 0,
  "checksum": "10047a119149e08fb206eea89832eee0",
  "min_disk": 0,
  "is_public": false,
  "deleted_at": null,
  "properties": {
    "kernel_id": "f14c0936-e591-4291-901f-239bc41fd3d6",
    "ramdisk_id": "cc111638-8590-4b5b-8759-f551017ea269"
  },
  "size": 25165824
}

context = {
  "project_name": "service",
  "user_id": "fa8ecb2a7110435daa10a5e9e459c7ca",
  "roles": [
    "admin",
    "member"
  ],
  "_read_deleted": "no",
  "timestamp": "2012-12-26T14:49:00.820425",
  "auth_token": "1f31ccc31d324ba88802826270772522",
  "remote_address": "192.168.75.137",
  "quota_class": null,
  "is_admin": true,
  "service_catalog": [
    {
      "endpoints_links": [],
      "endpoints": [
        {
          "adminURL": "http://192.168.75.137:8776/v1/2ffae825c88b448bad4ef4d14f5c1204/v2.0",
          "region": "RegionOne",
          "publicURL": "http://192.168.75.137:8776/v1/2ffae825c88b448bad4ef4d14f5c1204",
          "id": "82d6c5ae2899473c8aa77bd2ae99881b",
          "internalURL": "http://192.168.75.137:8776/v1/2ffae825c88b448bad4ef4d14f5c1204"
        }
      ],
      "type": "volume",
      "name": "volume"
    },
    {
      "endpoints_links": [],
      "endpoints": [
        {
          "adminURL": "http://192.168.75.137:9292/v1",
          "region": "RegionOne",
          "publicURL": "http://192.168.75.137:9292/v1",
          "id": "2e65219ddb4143b9b0a89c334a5177dc",
          "internalURL": "http://192.168.75.137:9292/v1"
        }
      ],
      "type": "image",
      "name": "glance"
    },
    {
      "endpoints_links": [],
      "endpoints": [
        {
          "adminURL": "http://192.168.75.137:8774/v2/2ffae825c88b448bad4ef4d14f5c1204",
          "region": "RegionOne",
          "publicURL": "http://192.168.75.137:8774/v2/2ffae825c88b448bad4ef4d14f5c1204",
          "id": "0e82d644a5cb47b1890f81bf67b43dec",
          "internalURL": "http://192.168.75.137:8774/v2/2ffae825c88b448bad4ef4d14f5c1204"
        }
      ],
      "type": "compute",
      "name": "nova"
    },
    {
      "endpoints_links": [],
      "endpoints": [
        {
          "adminURL": "http://192.168.75.137:35357/v2.0",
          "region": "RegionOne",
          "publicURL": "http://192.168.75.137:5000/v2.0",
          "id": "2d85bf25bb7e4e6a82efa67063d51ac1",
          "internalURL": "http://192.168.75.137:5000/v2.0"
        }
      ],
      "type": "identity",
      "name": "keystone"
    }
  ],
  "request_id": "req-bda14315-16de-4b23-8d53-24745f87fdad",
  "instance_lock_checked": false,
  "project_id": "2ffae825c88b448bad4ef4d14f5c1204",
  "user_name": "admin"
}

request_spec = {
  "block_device_mapping": [],
  "image": {
    "status": "active",
    "name": "tty-linux",
    "deleted": false,
    "container_format": "ami",
    "created_at": "2012-11-30T07:51:06.000000",
    "disk_format": "ami",
    "updated_at": "2012-11-30T07:51:07.000000",
    "properties": {
      "kernel_id": "f14c0936-e591-4291-901f-239bc41fd3d6",
      "ramdisk_id": "cc111638-8590-4b5b-8759-f551017ea269"
    },
    "min_disk": 0,
    "min_ram": 0,
    "checksum": "10047a119149e08fb206eea89832eee0",
    "owner": "2ffae825c88b448bad4ef4d14f5c1204",
    "is_public": false,
    "deleted_at": null,
    "id": "5c4c2339-55bd-4e9b-86cb-23694e3b9b17",
    "size": 25165824
  },
  "instance_type": {
    "memory_mb": 512,
    "root_gb": 0,
    "deleted_at": null,
    "name": "m1.tiny",
    "deleted": false,
    "created_at": null,
    "ephemeral_gb": 0,
    "updated_at": null,
    "disabled": false,
    "vcpus": 1,
    "extra_specs": {},
    "swap": 0,
    "rxtx_factor": 1.0,
    "is_public": true,
    "flavorid": "1",
    "vcpu_weight": null,
    "id": 2
  },
  "instance_properties": {
    "vm_state": "building",
    "availability_zone": null,
    "ramdisk_id": "cc111638-8590-4b5b-8759-f551017ea269",
    "instance_type_id": 2,
    "user_data": null,
    "vm_mode": null,
    "reservation_id": "r-sviqmkvr",
    "user_id": "fa8ecb2a7110435daa10a5e9e459c7ca",
    "display_description": "test02",
    "key_data": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDPrhT0VICqukep0Zl3lz+ZvzZOKVwBEa9IFk2rUcDnjse9zGPy9bZHorEoGYwiywOTTC+Q422rIhAJQvev7OKF4qViyndbLPrlZudeA7oFBc2I0rqUmSwrmQv1Pz4h8jrMdgelgWS1QDPgyFp3O72sS9wP0yQMZIneSdLIV2SxrxVxsISYL5GhbF/A7G9ejSRmLoZgQoDmDW+CtIHFX8EsDDC9K94Dz9F3UCMZwCGGRO4S2o+wValsAuE0xLUF8U6VJ86NrILEJYvNVXPeKyQl9Ktuow0LWqjxtnLv78R/5ayKff+bX/7cekNzG8yeTog7it4kdKaitIb+G5j+h7T nova@ubuntu\n",
    "power_state": 0,
    "progress": 0,
    "project_id": "2ffae825c88b448bad4ef4d14f5c1204",
    "config_drive": "",
    "ephemeral_gb": 0,
    "access_ip_v6": null,
    "access_ip_v4": null,
    "kernel_id": "f14c0936-e591-4291-901f-239bc41fd3d6",
    "key_name": "mykey",
    "display_name": "test02",
    "config_drive_id": "",
    "architecture": null,
    "root_gb": 0,
    "locked": false,
    "launch_time": "2012-12-26T14:42:55Z",
    "memory_mb": 512,
    "vcpus": 1,
    "image_ref": "5c4c2339-55bd-4e9b-86cb-23694e3b9b17",
    "root_device_name": null,
    "auto_disk_config": null,
    "os_type": null,
    "metadata": {}
  },
  "security_group": [
    "default"
  ],
  "instance_uuids": [
    "55c4f897-11a7-457b-9b70-c8ef28549711"
  ]
}

admin_password = "5godsYKky8AR"
injected_files = []
requested_networks = [
  [
    "0802c791-d4aa-473b-94a8-46d2b4aff91b",
    "192.168.100.5"
  ]
]
filter_properties = {
  "scheduler_hints": {}
}


nova.sheduler.filter_scheduler.py >> FilterScheduler >> schedule_run_instance()


nova.compute.manager.py >> ComputeManager >> _run_instance()

request_spec = {

  "block_device_mapping": [],

  "image": {

    "status": "active",

    "name": "tty-linux",

    "deleted": false,

    "container_format": "ami",

    "created_at": "2012-12-16T10:37:48.000000",

    "disk_format": "ami",

    "updated_at": "2012-12-16T10:37:49.000000",

    "properties": {

      "kernel_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

      "ramdisk_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78"

    },

    "min_disk": 0,

    "min_ram": 0,

    "checksum": "10047a119149e08fb206eea89832eee0",

    "owner": "0c74b5d96202433196af2faa9bff4bde",

    "is_public": false,

    "deleted_at": null,

    "id": "011a6a61-70fa-470b-a9cc-fbc7753833fb",

    "size": 25165824

  },

  "instance_type": {

    "memory_mb": 512,

    "root_gb": 0,

    "deleted_at": null,

    "name": "m1.tiny",

    "deleted": false,

    "created_at": null,

    "ephemeral_gb": 0,

    "updated_at": null,

    "disabled": false,

    "vcpus": 1,

    "extra_specs": {},

    "swap": 0,

    "rxtx_factor": 1.0,

    "is_public": true,

    "flavorid": "1",

    "vcpu_weight": null,

    "id": 2

  },

  "instance_properties": {

    "vm_state": "building",

    "availability_zone": null,

    "launch_time": "2012-12-24T16:45:50Z",

    "ramdisk_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

    "instance_type_id": 2,

    "user_data": null,

    "vm_mode": null,

    "reservation_id": "r-gzio9556",

    "user_id": "034120010ad64ecfb1eeb2ac5f16854d",

    "display_description": "test01",

    "key_data": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCiyiud+EmmdRZ50aPPbC7Ys3Td19qp6q3Xnl+W8aFHJ21IbdnCNXZo3pXpeTJy8rvFTitYxpvD5WzGlmPdXoEryJibA6hbPg6hPLINul+SwtuXlqv6pucy+eMVuWhi9MfOKv/uuJpCFIwZuEHGHg3xeW6uVyWSURW9FGH/E6tKdGrB9T2afkPaROOBnK2BRy3Bj55ExZq8qjfsYKDibwoDPddW9rR5zRn7N3pY6rhnULjyWJAd7Ll3UltKMkl3V2BZV0cyvd3c+TMtVtaa8hE9ComrxKOucd84d2+dOyUaV8hr3N3sfe/oXnvlK23Uo9TKwmYfXvTykOtAtaYRss/z nova@folsom\n",

    "power_state": 0,

    "progress": 0,

    "project_id": "0c74b5d96202433196af2faa9bff4bde",

    "config_drive": "",

    "ephemeral_gb": 0,

    "access_ip_v6": null,

    "access_ip_v4": null,

    "kernel_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

    "key_name": "mykey",

    "display_name": "test01",

    "config_drive_id": "",

    "architecture": null,

    "root_gb": 0,

    "locked": false,

    "launch_index": 0,

    "memory_mb": 512,

    "vcpus": 1,

    "image_ref": "011a6a61-70fa-470b-a9cc-fbc7753833fb",

    "root_device_name": null,

    "auto_disk_config": null,

    "os_type": null,

    "metadata": {}

  },

  "security_group": [

    "default"

  ],

  "instance_uuids": [

    "1be889ba-fe3b-4eb6-8730-157db1582f88"

  ]

}


filter_properties = {

  "config_options": {},

  "limits": {

    "memory_mb": 3000.0

  },

  "request_spec": {

    "block_device_mapping": [],

    "image": {

      "status": "active",

      "name": "tty-linux",

      "deleted": false,

      "container_format": "ami",

      "created_at": "2012-12-16T10:37:48.000000",

      "disk_format": "ami",

      "updated_at": "2012-12-16T10:37:49.000000",

      "properties": {

        "kernel_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

        "ramdisk_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78"

      },

      "min_disk": 0,

      "min_ram": 0,

      "checksum": "10047a119149e08fb206eea89832eee0",

      "owner": "0c74b5d96202433196af2faa9bff4bde",

      "is_public": false,

      "deleted_at": null,

      "id": "011a6a61-70fa-470b-a9cc-fbc7753833fb",

      "size": 25165824

    },

    "instance_type": {

      "memory_mb": 512,

      "root_gb": 0,

      "deleted_at": null,

      "name": "m1.tiny",

      "deleted": false,

      "created_at": null,

      "ephemeral_gb": 0,

      "updated_at": null,

      "disabled": false,

      "vcpus": 1,

      "extra_specs": {},

      "swap": 0,

      "rxtx_factor": 1.0,

      "is_public": true,

      "flavorid": "1",

      "vcpu_weight": null,

      "id": 2

    },

    "instance_properties": {

      "vm_state": "building",

      "availability_zone": null,

      "launch_time": "2012-12-24T16:45:50Z",

      "ramdisk_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

      "instance_type_id": 2,

      "user_data": null,

      "vm_mode": null,

      "reservation_id": "r-gzio9556",

      "user_id": "034120010ad64ecfb1eeb2ac5f16854d",

      "display_description": "test01",

      "key_data": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCiyiud+EmmdRZ50aPPbC7Ys3Td19qp6q3Xnl+W8aFHJ21IbdnCNXZo3pXpeTJy8rvFTitYxpvD5WzGlmPdXoEryJibA6hbPg6hPLINul+SwtuXlqv6pucy+eMVuWhi9MfOKv/uuJpCFIwZuEHGHg3xeW6uVyWSURW9FGH/E6tKdGrB9T2afkPaROOBnK2BRy3Bj55ExZq8qjfsYKDibwoDPddW9rR5zRn7N3pY6rhnULjyWJAd7Ll3UltKMkl3V2BZV0cyvd3c+TMtVtaa8hE9ComrxKOucd84d2+dOyUaV8hr3N3sfe/oXnvlK23Uo9TKwmYfXvTykOtAtaYRss/z nova@folsom\n",

      "power_state": 0,

      "progress": 0,

      "project_id": "0c74b5d96202433196af2faa9bff4bde",

      "config_drive": "",

      "ephemeral_gb": 0,

      "access_ip_v6": null,

      "access_ip_v4": null,

      "kernel_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

      "key_name": "mykey",

      "display_name": "test01",

      "config_drive_id": "",

      "architecture": null,

      "root_gb": 0,

      "locked": false,

      "launch_index": 0,

      "memory_mb": 512,

      "vcpus": 1,

      "image_ref": "011a6a61-70fa-470b-a9cc-fbc7753833fb",

      "root_device_name": null,

      "auto_disk_config": null,

      "os_type": null,

      "metadata": {}

    },

    "security_group": [

      "default"

    ],

    "instance_uuids": [

      "1be889ba-fe3b-4eb6-8730-157db1582f88"

    ]

  },

  "instance_type": {

    "memory_mb": 512,

    "root_gb": 0,

    "deleted_at": null,

    "name": "m1.tiny",

    "deleted": false,

    "created_at": null,

    "ephemeral_gb": 0,

    "updated_at": null,

    "disabled": false,

    "vcpus": 1,

    "extra_specs": {},

    "swap": 0,

    "rxtx_factor": 1.0,

    "is_public": true,

    "flavorid": "1",

    "vcpu_weight": null,

    "id": 2

  },

  "retry": {

    "num_attempts": 1,

    "hosts": [

      "folsom"

    ]

  },

  "scheduler_hints": {}

}


requested_networks[

  [

    "0802c791-d4aa-473b-94a8-46d2b4aff91b",

    "192.168.100.5"

  ]

]

injected_files = []

admin_password = "6Ty7wZA9wc5w"

is_first_time = true


instance = {

  "vm_state": "building",

  "availability_zone": null,

  "terminated_at": null,

  "ephemeral_gb": 0,

  "instance_type_id": 2,

  "user_data": null,

  "vm_mode": null,

  "deleted_at": null,

  "reservation_id": "r-gzio9556",

  "id": 4,

  "security_groups": [

    {

      "project_id": "0c74b5d96202433196af2faa9bff4bde",

      "user_id": "034120010ad64ecfb1eeb2ac5f16854d",

      "name": "default",

      "deleted": false,

      "created_at": "2012-12-16T11:47:01.000000",

      "updated_at": null,

      "rules": [

        {

          "from_port": 22,

          "protocol": "tcp",

          "deleted": false,

          "created_at": "2012-12-16T11:47:26.000000",

          "updated_at": null,

          "id": 1,

          "to_port": 22,

          "parent_group_id": 1,

          "cidr": "0.0.0.0/0",

          "deleted_at": null,

          "group_id": null

        },

        {

          "from_port": -1,

          "protocol": "icmp",

          "deleted": false,

          "created_at": "2012-12-16T11:47:41.000000",

          "updated_at": null,

          "id": 2,

          "to_port": -1,

          "parent_group_id": 1,

          "cidr": "0.0.0.0/0",

          "deleted_at": null,

          "group_id": null

        }

      ],

      "deleted_at": null,

      "id": 1,

      "description": "default"

    }

  ],

  "disable_terminate": false,

  "root_device_name": null,

  "user_id": "034120010ad64ecfb1eeb2ac5f16854d",

  "uuid": "1be889ba-fe3b-4eb6-8730-157db1582f88",

  "server_name": null,

  "default_swap_device": null,

  "info_cache": {

    "instance_uuid": "1be889ba-fe3b-4eb6-8730-157db1582f88",

    "deleted": false,

    "created_at": "2012-12-24T16:45:50.000000",

    "updated_at": null,

    "network_info": "[]",

    "deleted_at": null,

    "id": 4

  },

  "hostname": "test01",

  "launched_on": null,

  "display_description": "test01",

  "key_data": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCiyiud+EmmdRZ50aPPbC7Ys3Td19qp6q3Xnl+W8aFHJ21IbdnCNXZo3pXpeTJy8rvFTitYxpvD5WzGlmPdXoEryJibA6hbPg6hPLINul+SwtuXlqv6pucy+eMVuWhi9MfOKv/uuJpCFIwZuEHGHg3xeW6uVyWSURW9FGH/E6tKdGrB9T2afkPaROOBnK2BRy3Bj55ExZq8qjfsYKDibwoDPddW9rR5zRn7N3pY6rhnULjyWJAd7Ll3UltKMkl3V2BZV0cyvd3c+TMtVtaa8hE9ComrxKOucd84d2+dOyUaV8hr3N3sfe/oXnvlK23Uo9TKwmYfXvTykOtAtaYRss/z nova@folsom\n",

  "deleted": false,

  "scheduled_at": "2012-12-24T16:45:50.413093",

  "power_state": 0,

  "default_ephemeral_device": null,

  "progress": 0,

  "project_id": "0c74b5d96202433196af2faa9bff4bde",

  "launched_at": null,

  "config_drive": "",

  "ramdisk_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

  "access_ip_v6": null,

  "access_ip_v4": null,

  "kernel_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

  "key_name": "mykey",

  "updated_at": "2012-12-24T16:45:50.441013",

  "host": null,

  "display_name": "test01",

  "task_state": "scheduling",

  "shutdown_terminate": false,

  "root_gb": 0,

  "locked": false,

  "name": "instance-00000004",

  "created_at": "2012-12-24T16:45:50.000000",

  "launch_index": 0,

  "memory_mb": 512,

  "instance_type": {

    "memory_mb": 512,

    "root_gb": 0,

    "name": "m1.tiny",

    "deleted": false,

    "created_at": null,

    "ephemeral_gb": 0,

    "updated_at": null,

    "disabled": false,

    "vcpus": 1,

    "flavorid": "1",

    "swap": 0,

    "rxtx_factor": 1.0,

    "is_public": true,

    "deleted_at": null,

    "vcpu_weight": null,

    "id": 2

  },

  "vcpus": 1,

  "image_ref": "011a6a61-70fa-470b-a9cc-fbc7753833fb",

  "architecture": null,

  "auto_disk_config": null,

  "os_type": null,

  "metadata": []

}


image_meta = {

  "status": "active",

  "name": "tty-linux",

  "deleted": false,

  "container_format": "ami",

  "created_at": "2012-12-16T10:37:48.000000",

  "disk_format": "ami",

  "updated_at": "2012-12-16T10:37:49.000000",

  "properties": {

    "kernel_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78",

    "ramdisk_id": "619a49c6-e653-4ca2-93f0-2e0e8cb50e78"

  },

  "min_disk": 0,

  "min_ram": 0,

  "checksum": "10047a119149e08fb206eea89832eee0",

  "owner": "0c74b5d96202433196af2faa9bff4bde",

  "is_public": false,

  "deleted_at": null,

  "id": "011a6a61-70fa-470b-a9cc-fbc7753833fb",

  "size": 25165824

}


network_info = [

  {

    "network": {

      "bridge": "br100",

      "subnets": [

        {    

          "ips": [

            {    

              "meta": {},

              "version": 4,

              "type": "fixed",

              "floating_ips": [],

              "address": "192.168.100.2"

            }    

          ],   

          "version": 4,

          "meta": {

            "dhcp_server": "192.168.100.1"

          },   

          "dns": [

            {    

              "meta": {},

              "version": 4,

              "type": "dns",

              "address": "8.8.8.8"

            }    

          ],   

          "routes": [],

          "cidr": "192.168.100.0/24",

          "gateway": {

            "meta": {},

            "version": 4,

            "type": "gateway",

            "address": "192.168.100.1"

          }    

        },   

        {    

          "ips": [],

          "version": null,

          "meta": {

            "dhcp_server": null

          },   

          "dns": [],

          "routes": [],

          "cidr": null,

          "gateway": {

            "meta": {},

            "version": null,

            "type": "gateway",

            "address": null

          }    

        } 

      ],

      "meta": {

        "tenant_id": null,

        "should_create_bridge": true,

        "bridge_interface": "br100"

      },

      "id": "da8b8d70-6522-495a-b9f7-9bfadb931a8f",

      "label": "private"

    },

    "meta": {},

    "id": "fe9cd80f-c807-4869-9933-cafce241ac0e",

    "address": "fa:16:3e:31:f5:00"

  }

]


block_device_info = {

  "block_device_mapping": [],

  "root_device_name": null,

  "ephemerals": [],

  "swap": null

}


injected_files = []


nova.compute.manager.py >> ComputeManager >> _allocate_network()


vm_states = BUILDING

task_states = NETWORKING

expected_task_states = None



    nova.network.api.py >> API >> allocate_for_instance()


    nova.network.manager.py >> NetworkManager >> allocate_for_instance()


    nova.network.manager.py >> NetworkManager >> _allocate_mac_address()


    nova.network.manager.py >> RPCAllocateFixedIP >> _allocate_fixed_ips()


    nova.network.manager.py >> NetworkManager >> get_instance_nw_info()



nova.compute.manager.py >> ComputeManager >> _prep_block_device()


vm_states = BUILDING

task_states = BLOCK_DEVICE_MAPPING


nova.compute.manager.py >> ComputeManager >> _spawn()


[ VM 생성 시작할 때 ]

vm_states = BUILDING

task_states = SPAWNING

expected_task_states = BLOCK_DEVICE_MAPPING


[ 생성 종료된 후 ]

power_state = current_power_state

vm_state = ACTIVE

task_state = None

expected_task_states = SPAWNING


nova.virt.libvirt.driver.py >> LibvirtDriver >> spawn()





















반응형
Posted by seungkyua@gmail.com
,
반응형

1. RabbitMQ 의 흐름은 다음과 같다.


Producer -> Exchange -> Queue  -> Consumer

                                -> Queue


2. Exchange 의 타입

    - Direct exchange    : routing_key 로 일치하는 consumer 만 연결

    - Fan-out exchange : 모든 consumer 와 연결

    - Topic exchange    : *, # 을 사용하여 매치되는 consumer 와 연결

                                   *.stock.#   ->  us.stock(O),   eur.stock.db(O), stock.nasdaq(X)






반응형
Posted by seungkyua@gmail.com
,
반응형


1. Mac 에서 python setup.py install 실행시 저장되는 경로

/Library/Python/2.7/site-packages


2. package 만들기

python setup.py sdist


3. src install

python setup.py install --record files.txt


3. src 를 pip으로 install

pip install -e .


5. src uninstall

cat files.txt | xargs rm -rf


6. pip require package install

pip install -r pip-requires


7. pip installed package list

pip freeze


8. pip package uninstall

pip uninstall package-name














반응형
Posted by seungkyua@gmail.com
,
반응형

1. 3 different Cloud Service Models

     - IaaS(Infrastructure as a Service)

       PaaS (Platform as a Service)

       SaaS (Software as a Service)

     


2. 3 different Cloud Delivery Models

     - Private Cloud, Public Cloud, Hybrid Cloud


3. 5 primary characteristics of Cloud computing

    - On-demand self-service

    - Ubiquitous network access

    - Resource pooling

    - Dynamic and Elastic resource allocation

    - Measured service / Pay-per-use


4. 2 primary characteristics/elements of the Cloud Value Proposition

    - IT operations Efficiency, Business innovation


반응형
Posted by seungkyua@gmail.com
,
반응형

딱딱한 보고서로는 제가 보고 느낀 것을 제대로 보고할 수 없을 것 같아 이렇게 블로그라도 올립니다.


OpenStack Design Summit 뿐만 아니라 여러 회사가 모이는 컨퍼런스는 정말 중요한 것 같습니다.


사실 하나도 모르는 상태에서 관련 컨퍼런스를 참석한다면 아마도 1/3 도 못 얻어갈겁니다.

그러나 어느 정도 관련 기술을 활용하여 프로덕트를 만들고 있는 상태라면 일부 문제에 대한 해답을 찾을 수 있습니다.


1. 새로운 기술, 프로덕트의 장

    - 이번 컨퍼런스에서 주로 논의된 것은 클라우드에 맞게 기존의 네트워크의 개념을 뒤엎는 내용이었습니다.

      현재 우리가 구축한 시스템도 네트워크 측면에서 Scale out 에 문제가 있습니다.

      사실, 국내에서는 아무도 답을 줄 수 없었죠. 그리고 기존 자기 기술의 틀에 갇혀 해답이 없었습니다.

      저희 시스템도 당분간은 문제 없습니다. 고객이 빠르게 증가한다고 하더라도 어느 정도 확장이 가능합니다.

      근본적인 해결책은 아니더라도 말이죠.

      

      이곳에는 그런 문제들을 고민하고 해결하기 위한 많은 업체들이 자신이 주장하는 것을 제품으로 만들어 

      참여합니다. 완벽하지는 않지만 어느 정도는 서비스가 가능한 제품들이죠.

      그리고 그 제품을 팔기 위해 엔지니어들을 논리적으로 설득시킵니다. 물론 설득에 실패하면 제품도

      쓸데없어 지겠지요.


      하지만 중요한 것은 문제를 해결할 수 있는 기술과 개념입니다. 

      그리고 여기서 힌트를 얻어 어느 정도는 우리도 직접 개발이 가능하다는 생각이 듭니다. 

      절반의 문제는 해결할 수 있는 그림이 그려진 것이죠..


      또한 그들의 제품을 보면서 처음에는 구현이 불가능할 것 같은 요구사항을 구현 가능한 아키텍처로까지

      그릴 수 있었습니다.

      Auto Scaling 및 Hybrid Bursting 구현이 그런 경우입니다.

       

2. 향후 주요 개발 내용 및 방법에 대한 논의

     - OpenStack 의 주요 개발 내용 및 방법은 디자인 서밋에서 결정나는 군요.

        사실 어떤 내용들이 개발될지는 어느 정도 결정이 난 상태 같습니다. 하지만 개발자들이 논의해서

        어떻게 진행할지를 결정하는 것은 인상깊었습니다. 애자일처럼 설계서없이 그림으로 논의하고

        바로 실행에 옮기는 거죠. 그것도 온라인으로.. 이것이 오픈소스 커뮤니티를 가능한게 한거 같습니다.

        물론 논의할 때 좀 무시되는 발언들도 있지만, 그래도 모든 것은 열려있으니까.


        문제는 향후 적극적 참여를 위해서는 개발자간 face to face 로 논의하고 서로를 아는 것이 중요합니다.

        일단 내공을 보여주고 나면 서로에 깊은 관심을 갖게 되는 것이 개발자들이니까요..

        그런의미에서 가능하면 많은 개발자가 디자인서밋에 참여해야 합니다.

        그리고 그 방향에 맞추어 개발에 적극 참여하여야 합니다.

        수동적으로 소스만 갖다 쓸거면 아웃사이더밖에 될 수 없습니다.


        문제는 관련 개발자가 저만 왔다는 것이 문제입니다.

        아무래도 제가 전달하면 감흥이 떨어지겠죠? 시너지 효과도 덜할 거고..

        그리고 여기서 만난 개발자들은 저밖에 기억을 못하겠죠.

        관련 시장이 얼마나 큰데.. 그리고 회사에서 얼마를 투자하는데.. 개발자 몇명을 더 못데리고 오다니..

        회사 입장에서는 소탐대실이라고 밖에 할 수 없습니다.


3. 여러 업체를 통한 현재 우리 기술력에 대한 객관적 평가

      - 같은 기술을 도입하는 여러 업체를 통해서 우리의 기술력을 객관적으로 평가할 수 있었습니다.

         우리가 해결한 고민을 질문하는 업체도 있었고, 반대로 우리가 고민한 것을 솔루션으로 만들어 오는

         업체도 있었습니다.

         또한 질문을 통해서 서로가 얼마나 고민했는지를 알 수 있고요..

         제가 판단한 우리의 수준은 중상 정도??

         

마지막으로 이 곳 실리콘밸리에서는 클라우드 관련 인력과 빅데이터 관련 인력이 없습니다.

구글, 페이스북, HP, DELL, Cisco, IBM, 등등 메이저 업체가 다 데려갔죠..

링크드인에 본인을 잘 소개해 보세요.. 실리콘밸리에서 바로 콜이 올겁니다. ^^




반응형
Posted by seungkyua@gmail.com
,
반응형

2012 OpenStack Folsom Design Summit 이 San Francisco 에서 열렸습니다.

클라우드 시스템을 구축하는 입장에서 현지에서 우리의 현위치와 방향을 잡을 수 있는 기회였습니다.


이번 디자인서밋의 주요 화두는 Software 기반의 Virtual Network 와 관련된 Quantum 입니다.

거의 이틀에 거쳐서 Quantum 세션을 full 로 잡은 것을 보면 그 중요도도 알 수 있을 것입니다.


Quantum 이 왜 필요한지는 Flat Network 나 VLan 모드를 사용하여 클라우드 시스템을 구축하였다면 다들 몸으로 느끼고 있으실 겁니다. (적어도 저는..)


또 다른 하나는 Hybrid Cloud 입니다.

특히 구축이 거의 불가능할 거 같은 Hybrid Cloud Bursting 에 대한 기본 아키텍처 윤곽이 나왔다는 겁니다.

이 곳에서 여러 회사를 만나면서 힌트를 얻게 되는 군요.


아직 Hybrid Cloud 에 대해서 논의되지는 않았지만 아마도 올 하반기 디자인서밋에서 대두될 것 같습니다.

채택이 안된다면 제가 강력히 주장해야 겠습니다. ^^


디자인서밋은 클라우드 구축을 시작하는 회사도 방향을 잡는 부분에서 도움이 될 것이고, 어느 정도 지식이 있는 상태에서 디자인서밋에 참석하는 회사는 더 많은 것을 얻어갈 거라 생각이 듭니다.





반응형
Posted by seungkyua@gmail.com
,
반응형

파이썬에서 eventlet 모듈로 쓰레드를 사용하고 싶다면 세가지 방법이 있습니다.


1. eventlet.tpool 을 사용

    - 가장 간단한 방법으로 eventlet.tpool.execute(function(), ...) 방식으로 쓰레드를 생성하여 호출.

      pool 을 사용하며 쓰레드 20개가 기본.

      pool 사이즈를 늘릴려면 환경변수 EVENTLET_THREADPOOL_SIZE 의 값을 변경


2. eventlet 을 사용

    - eventlet.spawn(function(), ...) 을 사용하여 쓰레드를 생성하여 호출

      리턴값으로 eventlet.greenthread.GreenThread 가 리턴됨.

      pool 을 사용할 수 없음


3. eventlet.greenpool 을 사용

    - eventlet.greenpool.GreenPool.spawn(function(), ...) 을 사용하여 쓰레드를 생성하여 호출

      리턴값으로 eventlet.greenthread.GreenThread 가 리턴됨.

      pool 사용이 가능



반응형
Posted by seungkyua@gmail.com
,