`
8366
  • 浏览: 797888 次
  • 性别: Icon_minigender_1
  • 来自: 西安
社区版块
存档分类
最新评论

K8S hello world

    博客分类:
  • K8S
阅读更多

 

我的实验环境是redhat 7.1 + K8S all in one + binary install

 

组件 ip
etcd; kube-apiserver; kube-controller-manager; kube-scheduler 9.21.62.200
kube-proxy; kubelet 9.21.62.200

 

 

1. 安装docker

https://get.docker.com/rpm/1.7.1/centos-7/RPMS/x86_64/docker-engine-1.7.1-1.el7.centos.x86_64.rpm

2. 安装etcd

https://github.com/coreos/etcd/releases/download/v2.0.11/etcd-v2.0.11-linux-amd64.tar.gz

3. 安装K8S

   1) download k8s from https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v0.16.1/kubernetes.tar.gz

   2) install tree

 [root@xlhu2 bin]# ll /root/k8s/
total 101360
-rw-r--r-- 1 root root   4735688 Jul 14  2015 docker-engine-1.7.1-1.el7.centos.x86_64.rpm
drwxr-xr-x 3  501 games      117 Jan 21 02:53 etcd-v0.4.6-linux-amd64
-rw-r--r-- 1 root root   6022487 Oct 29  2014 etcd-v0.4.6-linux-amd64.tar.gz
drwxr-xr-x 7 root wheel      130 Jan 21 03:05 kubernetes
-rw-r--r-- 1 root root  93016034 May  1  2015 kubernetes.tar.gz
-rw-r--r-- 1 root root       714 Jan 22 03:00 replicationcontroller.json
-rw-r--r-- 1 root root       409 Jan 22 02:42 test-pod.json
-rw-r--r-- 1 root root       175 Jan 22 03:14 test-svc.json
[root@xlhu2 bin]#

      3)  运行kube-apiserver

   

./kube-apiserver --address=0.0.0.0  --insecure-port=8080 --portal_net="127.0.0.0/16" --log_dir=/var/log/kube  --kubelet_port=10250 --v=0  --logtostderr=false --etcd_servers=http://127.0.0.1:4001 --allow_privileged=false
 

 

    4) 运行kube-controller-manager

 

./kube-controller-manager  --v=0 --logtostderr=false --log_dir=/var/log/kube  --master=127.0.0.1:8080 --machines=127.0.0.1
 

 

    5) 运行kube-scheduler

   

./kube-scheduler  --master=127.0.0.1:8080  --v=0  --log_dir=/var/log/kube
  

 

   6) 运行kube-proxy

  

./kube-proxy  --logtostderr=false  --v=0  --master=http://127.0.0.1:8080 
   

 

   7) 运行kubelet

   

./kubelet  --logtostderr=false  --v=0  --allow-privileged=false   --log_dir=/var/log/kube  --address=127.0.0.1  --port=10250  --hostname_override=127.0.0.1   --api_servers=http://127.0.0.1:8080
    8) 创建pod

 

      a. create json file
{
  "id": "fedoraapache",
  "kind": "Pod",
  "apiVersion": "v1beta1",
  "desiredState": {
    "manifest": {
      "version": "v1beta1",
      "id": "fedoraapache",
      "containers": [{
        "name": "fedoraapache",
        "image": "fedora/apache",
        "ports": [{
          "containerPort": 80,
          "hostPort": 8080
        }]
      }]
    }
  },
  "labels": {
    "name": "fedoraapache"
  }
}
    b.  create pod
./kubectl create -f test-pod.json
    c. check result
   
[root@xlhu2 bin]# cd /root/k8s/kubernetes/server/kubernetes/server/bin
[root@xlhu2 bin]# ./kubectl get pods
POD            IP            CONTAINER(S)   IMAGE(S)        HOST                  LABELS              STATUS    CREATED   MESSAGE
fedoraapache   172.17.0.39                                  127.0.0.1/127.0.0.1   name=fedoraapache   Running   2 hours   
                             fedoraapache   fedora/apache                                             Running   2 hours   
[root@xlhu2 bin]# docker ps
CONTAINER ID        IMAGE                                  COMMAND             CREATED             STATUS              PORTS                  NAMES
7dec2cb57b83        fedora/apache                          "/run-apache.sh"    2 hours ago         Up 2 hours                                 k8s_fedoraapache.a1850cda_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_b1512d92   
2b452bfc0bab        gcr.io/google_containers/pause:0.8.0   "/pause"            2 hours ago         Up 2 hours          0.0.0.0:8090->80/tcp   k8s_POD.f60e046f_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_9aa22d2f            
[root@xlhu2 bin]# curl localhost:8090
Apache
[root@xlhu2 bin]# 
   
   9) 创建ReplicationController
 Replication Controller是Kubernetes系统中最有用的功能,实现复制多个Pod副本,往往一个应用需要多个Pod来支撑,并且可以保证其复制的副本数,即使副本所调度分配的主宿机出现异常,通过Replication Controller可以保证在其它主宿机启用同等数量的Pod。Replication Controller可以通过repcon模板来创建多个Pod副本,同样也可以直接复制已存在Pod,需要通过Label selector来关联
    a. create json file
   
{
    "id": "lianjiatest.com",
    "apiVersion": "v1beta1",
    "kind": "ReplicationController",
    "desiredState": {
      "replicas": 5,
      "replicaSelector": {"name": "liutest"},
      "podTemplate": {
        "desiredState": {
           "manifest": {
             "version": "v1beta1",
             "id": "apacheserver",
             "containers": [{
               "name": "apachetest",
               "image": "fedora/apache",
               "imagePullPolicy": "PullIfNotPresent",
               "ports": [{
                   "containerPort": 80
               }]
             }]
           }
         },
         "labels": {"name": "liutest"}
        }},
    "labels": {"name": "replicationtest"}
  }
      b.  create 5 个副本的  pod
     
./kubectl create -f replicationcontroller.json
 
    c. check result
    
[root@xlhu2 bin]# ./kubectl create -f /root/k8s/replicationcontroller.json 
replicationControllers/lianjiatest.com
[root@xlhu2 bin]# ./kubectl get pods
POD                     IP            CONTAINER(S)   IMAGE(S)        HOST                  LABELS              STATUS    CREATED     MESSAGE
fedoraapache            172.17.0.39                                  127.0.0.1/127.0.0.1   name=fedoraapache   Running   2 hours     
                                      fedoraapache   fedora/apache                                             Running   2 hours     
lianjiatest.com-0suix                                                127.0.0.1/            name=liutest        Pending   6 seconds   
                                      apachetest     fedora/apache                                                       
lianjiatest.com-2k5pl   172.17.0.40                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   4 seconds   
lianjiatest.com-otn5w   172.17.0.43                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   2 seconds   
lianjiatest.com-p4nim   172.17.0.42                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   3 seconds   
lianjiatest.com-t7hn1   172.17.0.41                                  127.0.0.1/127.0.0.1   name=liutest        Running   6 seconds   
                                      apachetest     fedora/apache                                             Running   3 seconds   
[root@xlhu2 bin]# docker ps
CONTAINER ID        IMAGE                                  COMMAND             CREATED             STATUS              PORTS                  NAMES
2abbc1781b99        fedora/apache                          "/run-apache.sh"    11 seconds ago      Up 8 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-0suix_default_e4ddc3de-c2ac-11e5-b1dd-525400a5a3b1_0771d52c   
4ab5778a9ad6        fedora/apache                          "/run-apache.sh"    11 seconds ago      Up 8 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-otn5w_default_e4dd3625-c2ac-11e5-b1dd-525400a5a3b1_b2d65e5d   
5ad5b3b60d38        fedora/apache                          "/run-apache.sh"    11 seconds ago      Up 9 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-p4nim_default_e4ddf207-c2ac-11e5-b1dd-525400a5a3b1_9ad86417   
ab616eacacf4        fedora/apache                          "/run-apache.sh"    12 seconds ago      Up 9 seconds                               k8s_apachetest.75f50b88_lianjiatest.com-t7hn1_default_e4ddfce7-c2ac-11e5-b1dd-525400a5a3b1_3568fa44   
a9be9c705726        fedora/apache                          "/run-apache.sh"    12 seconds ago      Up 10 seconds                              k8s_apachetest.75f50b88_lianjiatest.com-2k5pl_default_e4dd545e-c2ac-11e5-b1dd-525400a5a3b1_6140f4dc   
99c857266bd6        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 10 seconds                              k8s_POD.d41d03ce_lianjiatest.com-0suix_default_e4ddc3de-c2ac-11e5-b1dd-525400a5a3b1_265b8238          
8a529706a844        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 10 seconds                              k8s_POD.d41d03ce_lianjiatest.com-otn5w_default_e4dd3625-c2ac-11e5-b1dd-525400a5a3b1_653ca41d          
5dea06978306        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 11 seconds                              k8s_POD.d41d03ce_lianjiatest.com-p4nim_default_e4ddf207-c2ac-11e5-b1dd-525400a5a3b1_8e2ec53c          
20dab1b797db        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 11 seconds                              k8s_POD.d41d03ce_lianjiatest.com-t7hn1_default_e4ddfce7-c2ac-11e5-b1dd-525400a5a3b1_17e70e3b          
b32e94be7ac4        gcr.io/google_containers/pause:0.8.0   "/pause"            13 seconds ago      Up 11 seconds                              k8s_POD.d41d03ce_lianjiatest.com-2k5pl_default_e4dd545e-c2ac-11e5-b1dd-525400a5a3b1_64468c87          
7dec2cb57b83        fedora/apache                          "/run-apache.sh"    2 hours ago         Up 2 hours                                 k8s_fedoraapache.a1850cda_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_b1512d92          
2b452bfc0bab        gcr.io/google_containers/pause:0.8.0   "/pause"            2 hours ago         Up 2 hours          0.0.0.0:8090->80/tcp   k8s_POD.f60e046f_fedoraapache_default_c1f162b6-c29a-11e5-a9f3-525400a5a3b1_9aa22d2f                   
[root@xlhu2 bin]# 
    OK! 即使在slave删掉其中的几个,也会迅速补充到5个~~~~
    10) create service
        Services是Kubernetes最外围的单元,通过虚拟一个访问IP及服务端口,可以访问我们定义好的Pod资源,目前的版本是通过iptables的nat转发来实现,转发的目标端口为Kube_proxy生成的随机端口
     a. create json file
    
{
  "id": "webserver",
  "kind": "Service",
  "apiVersion": "v1beta1",
  "selector": {
    "name": "liutest"
  },
  "protocol": "TCP",
  "containerPort": 80,
  "port": 8080
}
       b. create service and check result
   
[root@xlhu2 bin]# ./kubectl create -f /root/k8s/test-svc.json
services/webserver
[root@xlhu2 bin]# ./kubectl get svc
NAME            LABELS                                    SELECTOR       IP             PORT(S)
kubernetes      component=apiserver,provider=kubernetes   <none>         127.0.0.2      443/TCP
kubernetes-ro   component=apiserver,provider=kubernetes   <none>         127.0.0.1      80/TCP
webserver       <none>                                    name=liutest   127.0.33.201   8080/TCP
[root@xlhu2 bin]# curl 127.0.33.201:8080
Apache
[root@xlhu2 bin]#
 
   最后总结下注意点:
在replicationcontronllers.json中,"replicaSelector": {"name": "XXXXXX"}要与"labels": {"name": "XXXXXXX"}以及service中的"selector": {"name": "XXXXXXX"}保持一致;
 
Reference:
  http://segmentfault.com/a/119000000288679
 
 
 
分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics