K8sInLXD

TurnToJPG -->


制作镜像

Convert image:

# qemu-img convert a.qcow2 a.img
# kpartx a.img
# vgscan
# lvscan
# mount /dev/vg-root/xougowueg /mnt
# sudo tar -cvzf rootfs.tar.gz -C /mnt .

Create the metadata.tar.gz and import image

# vim metadata.yaml
architecture: "aarch64"
creation_date: 1592803465
properties:             
architecture: "aarch64"
description: "Rong-node"
os: "ubuntu"
release: "focal"  
# tar czvf metadata.tar.gz metadata.yaml
# lxc image import metadata.tar.gz rootfs.tar.gz --alias "gowuogu"

k8s in lxc

检查主机上profile是否创建:

+---------+---------+
|  NAME   | USED BY |
+---------+---------+
| default | 0       |
+---------+---------+
| ourk8s  | 4       |
+---------+---------+

如果不存在,创建:

# lxc profile create ourk8s
# cat > kubernetes.profile <<EOF
config:
  linux.kernel_modules: ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter
  raw.lxc: "lxc.apparmor.profile=unconfined\nlxc.cap.drop= \nlxc.cgroup.devices.allow=a\nlxc.mount.auto=proc:rw sys:rw"
  security.nesting: "true"
  security.privileged: "true"
description: Kubernetes LXD profile
devices:
  eth0:
    name: eth0
    nictype: bridged
    parent: lxdbr0
    type: nic
  root:
    path: /
    pool: default
    type: disk
name: kubernetes
EOF

# lxc  profile edit ourk8s < kubernetes.profile

检查rong-2004是否存在在镜像仓库上:

# lxc image list
+-----------+--------------+--------+-------------+---------+----------+------------------------------+
|   ALIAS   | FINGERPRINT  | PUBLIC | DESCRIPTION |  ARCH   |   SIZE   |         UPLOAD DATE          |
+-----------+--------------+--------+-------------+---------+----------+------------------------------+
| rong-2004 | f511553a81a9 | no     |             | aarch64 | 674.77MB | Jun 22, 2020 at 6:11am (UTC) |
+-----------+--------------+--------+-------------+---------+----------+------------------------------+

创建3个lxc容器:

# lxc launch rong-2004 k8s1 --profile ourk8s && lxc launch rong-2004 k8s2 --profile ourk8s && lxc launch rong-2004 k8s3 --profile ourk8s
Creating k8s1
Starting k8s1
Creating k8s2
Starting k8s2
Creating k8s3
Starting k8s3

等待大约2分钟等待容器启动完毕:

# lxc ls
+---------+---------+----------------------------+-----------------------------------------------+------------+-----------+
|  NAME   |  STATE  |            IPV4            |                     IPV6                      |    TYPE    | SNAPSHOTS |
+---------+---------+----------------------------+-----------------------------------------------+------------+-----------+
| k8s1    | RUNNING | 10.230.146.83 (eth0)       | fd42:6fd0:9ed5:600b:216:3eff:fede:3897 (eth0) | PERSISTENT | 0         |
+---------+---------+----------------------------+-----------------------------------------------+------------+-----------+
| k8s2    | RUNNING | 10.230.146.201 (eth0)      | fd42:6fd0:9ed5:600b:216:3eff:fed1:ab8a (eth0) | PERSISTENT | 0         |
+---------+---------+----------------------------+-----------------------------------------------+------------+-----------+
| k8s3    | RUNNING | 10.230.146.33 (eth0)       | fd42:6fd0:9ed5:600b:216:3eff:fef8:f20c (eth0) | PERSISTENT | 0         |
+---------+---------+----------------------------+-----------------------------------------------+------------+-----------+

ssh到 k8s1 上执行安装:

# scp -r root@192.192.189.128:/media/sdd/20200617/Rong-v2006-arm .

注意: 以20200617/Rong-v2006-arm下的部署文件才可以部署在lxc里.
注意: 相关更改已同步在外网.

更改IP配置并执行install.sh basic安装:

root@node:/home/test/Rong-v2006-arm# cat hosts.ini 
[all]
focal-1 ansible_host=10.230.146.83 ip=10.230.146.83
focal-2 ansible_host=10.230.146.201 ip=10.230.146.201
focal-3 ansible_host=10.230.146.33 ip=10.230.146.33

[kube-deploy]
focal-1

[kube-master]
focal-1
focal-2

[etcd]
focal-1
focal-2
focal-3

[kube-node]
focal-1
focal-2
focal-3

[k8s-cluster:children]
kube-master
kube-node

[all:vars]
ansible_ssh_user=root
ansible_ssh_private_key_file=./.rong/deploy.key
root@node:/home/test/Rong-v2006-arm# ./install.sh basic

安装完毕后,检查是否运行正常:

root@node:/home/test/Rong-v2006-arm# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
focal-1   Ready    master   9m28s   v1.17.6
focal-2   Ready    master   8m1s    v1.17.6
focal-3   Ready    <none>   5m57s   v1.17.6
root@node:/home/test/Rong-v2006-arm# kubectl get pods 
No resources found in default namespace.
root@node:/home/test/Rong-v2006-arm# kubectl get pods  --all-namespaces
NAMESPACE     NAME                                          READY   STATUS             RESTARTS   AGE
kube-system   calico-kube-controllers-6df95cc8f5-n5b75      1/1     Running            0          4m38s
kube-system   calico-node-88xxf                             1/1     Running            1          5m31s
kube-system   calico-node-mnjpr                             1/1     Running            1          5m31s
kube-system   calico-node-sz4v8                             1/1     Running            1          5m31s
kube-system   coredns-76798d84dd-knq4j                      1/1     Running            0          3m54s
kube-system   coredns-76798d84dd-llrlt                      1/1     Running            0          4m11s
kube-system   dns-autoscaler-7b6dc7cdb9-2vgfs               1/1     Running            0          4m4s
kube-system   kube-apiserver-focal-1                        1/1     Running            0          9m12s
kube-system   kube-apiserver-focal-2                        1/1     Running            0          7m47s
kube-system   kube-controller-manager-focal-1               1/1     Running            0          9m12s
kube-system   kube-controller-manager-focal-2               1/1     Running            0          7m46s
kube-system   kube-proxy-2nms7                              1/1     Running            0          6m2s
kube-system   kube-proxy-9cwpm                              1/1     Running            0          6m
kube-system   kube-proxy-nkd5r                              1/1     Running            0          6m4s
kube-system   kube-scheduler-focal-1                        1/1     Running            0          9m12s
kube-system   kube-scheduler-focal-2                        1/1     Running            0          7m46s
kube-system   kubernetes-dashboard-5d5cb8976f-2hdtq         1/1     Running            0          4m1s
kube-system   kubernetes-metrics-scraper-747b4fd5cd-vhfr5   1/1     Running            0          3m59s
kube-system   metrics-server-849f86c88f-h6prj               1/2     CrashLoopBackOff   4          3m15s
kube-system   nginx-proxy-focal-3                           1/1     Running            0          6m8s
kube-system   tiller-deploy-56bc5dccc6-cfjkh                1/1     Running            0          3m34s

删除

验证完毕后,可以删除不用的镜像:

# lxc stop XXXXX 
# lxc rm XXXXX

删除中如果出现问题, 则手动更改容器中某文件权限后可以删除:

root@arm01:~/app# lxc rm kkkkk
Error: error removing /var/lib/lxd/storage-pools/default/containers/kkkkk: rm: cannot remove '/var/lib/lxd/storage-pools/default/containers/kkkkk/rootfs/etc/resolv.conf': Operation not permitted

root@arm01:~/app# chattr  -i /var/lib/lxd/storage-pools/default/containers/kkkkk/rootfs/etc/resolv.conf
root@arm01:~/app# lxc rm kkkkk

kpartx issue

List device mapping that would be created:

# kpartx -l sgoeuog.img

Create mappings/create dev for image:

# kpartx -av gowgowu.img
/dev/loop0

/dev/mapper/loop0p1
...
/dev/mapper/loop0pn