WorkingTipsInRonggraphInLXD

lxd environment

Install lxd(Offline):

snap download core
snap download core18
snap download lxd
snap ack core18_1885.assert; snap ack core_10185.assert; snap ack lxd_17936.assert
snap install core18_1885.snap ; snap install core_10185.snap ; snap install lxd_17936.snap
dpkg -i ./lxd_1%3a0.9_all.deb
which lxc
which lxd

Show lxc images:

root@rong320-1:~/lxd# lxc image list
If this is your first time running LXD on this machine, you should also run: lxd init
To start your first instance, try: lxc launch ubuntu:18.04

+-------+-------------+--------+-------------+--------------+------+------+-------------+
| ALIAS | FINGERPRINT | PUBLIC | DESCRIPTION | ARCHITECTURE | TYPE | SIZE | UPLOAD DATE |
+-------+-------------+--------+-------------+--------------+------+------+-------------+

Download lxd images:

https://us.images.linuxcontainers.org/images/alpine/3.12/amd64/default/20201021_13:00/
Download
rootfs.squashfs lxd.tar.xz 
root@rong320-1:~/lxdimages# ls
lxd.tar.xz  rootfs.squashfs
root@rong320-1:~/lxdimages# lxc image import lxd.tar.xz rootfs.squashfs --alias alpine312
Image imported with fingerprint: 76560d125792d7710d70f41b060e81f0bd4d83f1cc4e8dbd43fc371e5dea27bf
root@rong320-1:~/lxdimages# lxc image list
+-----------+--------------+--------+------------------------------------------+--------------+-----------+--------+------------------------------+
|   ALIAS   | FINGERPRINT  | PUBLIC |               DESCRIPTION                | ARCHITECTURE |   TYPE    |  SIZE  |         UPLOAD DATE          |
+-----------+--------------+--------+------------------------------------------+--------------+-----------+--------+------------------------------+
| alpine312 | 76560d125792 | no     | Alpinelinux 3.12 x86_64 (20201021_13:00) | x86_64       | CONTAINER | 2.40MB | Oct 22, 2020 at 3:48am (UTC) |
+-----------+--------------+--------+------------------------------------------+--------------+-----------+--------+------------------------------+
Auto Config the lxd(https://discuss.linuxcontainers.org/t/usage-of-lxd-init-preseed/1069/3)
(https://lxd.readthedocs.io/en/latest/preseed/)
cat <<EOF | lxd init --preseed
config:
  core.https_address: 10.137.149.161:9199
  images.auto_update_interval: 15
networks:
- name: lxdbr0
  type: bridge
  config:
    ipv4.address: auto
    ipv6.address: none
EOF
root@rong320-1:~/lxdimages# cat storages.yml 
storage_pools:
- name: default
  driver: dir
  config:
    source: ""
root@rong320-1:~/lxdimages# lxd init --preseed<./storages.yml

root@rong320-1:~/lxdimages# cat profiles.yml 
profiles:
- name: default
  devices:
    root:
      path: /
      pool: default
      type: disk
    eth0:
      nictype: bridged
      parent: lxdbr0
      type: nic
root@rong320-1:~/lxdimages# lxd init --preseed<profiles.yml

Now we could check the default lxd bridges(lxdbr0).

Docker/Docker-compose in alpine

Create a new profile named k8s:

# lxc launch alpine312 firstalpine -p k8s

Create the first alpine instance:

# lxc launch alpine312 firstalpine
Creating firstalpine
Starting firstalpine           
# lxc ls
+-------------+---------+---------------------+------+-----------+-----------+
|    NAME     |  STATE  |        IPV4         | IPV6 |   TYPE    | SNAPSHOTS |
+-------------+---------+---------------------+------+-----------+-----------+
| firstalpine | RUNNING | 10.31.47.210 (eth0) |      | CONTAINER | 0         |
+-------------+---------+---------------------+------+-----------+-----------+
root@rong320-1:~/lxdimages# lxc exec firstalpine /bin/sh
~ # cat /etc/issue
Welcome to Alpine Linux 3.12
Kernel \r on an \m (\l)

Configure repository:

 # echo "https://mirrors.aliyun.com/alpine/v3.12/main/" > /etc/apk/repositories
 # echo "https://mirrors.aliyun.com/alpine/v3.12/community/" >> /etc/apk/repositories
# apk update
# apk add docker-engine docker-compose docker-cli

Create the cgroups-patch file under /etc/init.d:

#!/sbin/openrc-run

description="Mount the control groups for Docker"

depend()
{
    keyword -docker
    need sysfs cgroups
}

start()
{
    if [ -d /sys/fs/cgroup ]; then
        mkdir -p /sys/fs/cgroup/cpu,cpuacct
        mkdir -p /sys/fs/cgroup/net_cls,net_prio

        mount -n -t cgroup cgroup /sys/fs/cgroup/cpu,cpuacct -o rw,nosuid,nodev,noexec,relatime,cpu,cpuacct
        mount -n -t cgroup cgroup /sys/fs/cgroup/net_cls,net_prio -o rw,nosuid,nodev,noexec,relatime,net_cls,net_prio

        if ! mountinfo -q /sys/fs/cgroup/openrc; then
            local agent="${RC_LIBEXECDIR}/sh/cgroup-release-agent.sh"
            mkdir -p /sys/fs/cgroup/openrc
            mount -n -t cgroup -o none,nodev,noexec,nosuid,name=systemd,release_agent="$agent" openrc /sys/fs/cgroup/openrc
        fi
    fi

    return 0
}

Added the auto-start and reboot:

# rc-update add cgroups-patch boot
# vim /etc/init.d/docker
.....
start_pre() {
        #checkpath -f -m 0644 -o root:docker "$DOCKER_ERRFILE" "$DOCKER_OUTFILE"
        echo "fucku"
}
.....
# rc-service docker start
# rc-update add docker default
# reboot
After reboot, check docker version

push files into lxc instance:

# lxc file push -r podmanitems/ firstalpine/root/
load all images
# docker images
~ # docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
rong/ui             master              66ad16eb15c5        20 minutes ago      28.9MB
rong/server         master              8150777ead18        23 hours ago        301MB
rong/kobe           master              2d0a03d6cedb        2 days ago          231MB
rong/nginx          1.19.2-amd64        7e4d58f0e5f3        5 weeks ago         133MB
rong/webkubectl     v2.6.0-amd64        4aa634837fea        2 months ago        349MB
rong/mysql-server   8.0.21-amd64        8a3a24ad33be        3 months ago        366MB
# lxc file push ronggraph.tar firstalpine/root/
# tar xzvf /root/ronggraph.tar

Should write a start definition for ronggraph:

#!/sbin/openrc-run
#
# author: Yusuke Kawatsu

workspace="/root/ronggraph"
cmdpath="/usr/bin/docker-compose"
prog="ronggraph"
lockfile="/var/lock/ronggraph"
pidfile="/var/run/ronggraph.pid"
PATH="$PATH:/usr/local/bin"


start() {
    [ -x $cmdpath ] || exit 5
    echo -n $"Starting $prog: "

    cd $workspace
    $cmdpath up -d
    $cmdpath down
    retval=$?
    pid=$!
    echo
    [ $retval -eq 0 ] && touch $lockfile && echo $pid > $pidfile

    return $retval
}

stop() {
    [ -x $cmdpath ] || exit 5
    echo -n $"Stopping $prog: "

    cd $workspace
    $cmdpath down
    retval=$?
    echo
    [ $retval -eq 0 ] && rm -f $lockfile && rm -f $pidfile

    return $retval
}

restart() {
    stop
    sleep 3
    start
}

depend() {
    need docker
}

Now add ronggraph to default update:

# rc-update add ronggraph default
# halt

Save the current status:

root@rong320-1:~/lxdimages# lxc stop firstalpine
root@rong320-1:~/lxdimages# lxc publish --public firstalpine --alias=ronggraph
root@rong320-1:/mnt# lxc image ls
+-----------+--------------+--------+------------------------------------------+--------------+-----------+----------+------------------------------+
|   ALIAS   | FINGERPRINT  | PUBLIC |               DESCRIPTION                | ARCHITECTURE |   TYPE    |   SIZE   |         UPLOAD DATE          |
+-----------+--------------+--------+------------------------------------------+--------------+-----------+----------+------------------------------+
| alpine312 | 76560d125792 | no     | Alpinelinux 3.12 x86_64 (20201021_13:00) | x86_64       
| CONTAINER | 2.40MB   | Oct 22, 2020 at 6:18am (UTC) |
+-----------+--------------+--------+------------------------------------------+--------------+-----------+----------+------------------------------+
| ronggraph | b31788790460 | yes    | Alpinelinux 3.12 x86_64 (20201021_13:00) | x86_64       | CONTAINER | 619.40MB | Oct 22, 2020 at 8:05am (UTC) |
+-----------+--------------+--------+------------------------------------------+--------------+-----------+----------+------------------------------+

launch new instance:

# lxc launch ronggraph ronggraph -p k8s
# 

Add forward rules:

lxc config device add ronggraph myport80 proxy listen=tcp:0.0.0.0:80 connect=tcp:0.0.0.0:80
lxc config device add ronggraph myport443 proxy listen=tcp:0.0.0.0:443 connect=tcp:0.0.0.0:443

arm64 workingtips

Under rpi archlinux64, install:

# pacman -Sy
# pacman -S lxc lxd
# systemctl enable lxd
# systemctl start lxd

Download images from:

https://us.images.linuxcontainers.org/images/alpine/3.12/arm64/default/20201022_13:00/
rootfs.squashfs
lxd.tar.xz
# lxc image import lxd.tar.xz rootfs.squashfs --alias alpine312
# lxd init --preseed<pre-rong/lxditems/lxd_snap/init.yaml
# lxc profile create k8s
# lxc profile edit k8s<pre-rong/lxditems/lxdimages/k8s.yaml

/images/2020_10_23_09_55_43_629x378.jpg

Configure lxc for running:

https://wiki.archlinux.org/index.php/Linux_Containers

lxc Installation:

# ~ # sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories
~ # cat /etc/apk/repositories 
http://mirrors.ustc.edu.cn/alpine/v3.12/main
http://mirrors.ustc.edu.cn/alpine/v3.12/community
Install docker/docker-compose, modify its startup

lxc public will take a very long time!!!

# lxc ls
+------+---------+------------------------------+------+-----------+-----------+
| NAME |  STATE  |             IPV4             | IPV6 |   TYPE    | SNAPSHOTS |
+------+---------+------------------------------+------+-----------+-----------+
| king | RUNNING | 172.18.0.1 (br-74a26d2404f6) |      | CONTAINER | 0         |
|      |         | 172.17.0.1 (docker0)         |      |           |           |
|      |         | 10.150.132.185 (eth0)        |      |           |           |
+------+---------+------------------------------+------+-----------+-----------+
# lxc publish --public king --alias=ronggraph
# lxc image ls
+-----------+--------------+--------+-------------------------------------------+--------------+-----------+-----------+-------------------------------+
|   ALIAS   | FINGERPRINT  | PUBLIC |                DESCRIPTION                | ARCHITECTURE |   TYPE    |   SIZE    |          UPLOAD DATE          |
+-----------+--------------+--------+-------------------------------------------+--------------+-----------+-----------+-------------------------------+
| alpine312 | 58ebec92505e | no     | Alpinelinux 3.12 aarch64 (20201022_13:00) | aarch64      | CONTAINER | 2.20MB    | Oct 23, 2020 at 1:52am (UTC)  |
+-----------+--------------+--------+-------------------------------------------+--------------+-----------+-----------+-------------------------------+
| ronggraph | 607287f518d4 | yes    | Alpinelinux 3.12 aarch64 (20201022_13:00) | aarch64      | CONTAINER | 2655.15MB | Oct 23, 2020 at 10:13am (UTC) |
+-----------+--------------+--------+-------------------------------------------+--------------+-----------+-----------+-------------------------------+
#  lxc image export ronggraph .
# ls *.tar.gz
-rw-r--r--  1 root  root   2784123072 Oct 26 00:40 607287f518d40783ed968cd2f2434fba101d4332ccc16f1e66cfb43049208d57.tar.gz

Transfer the tar.gz into the arm64 server, and run it.

# /snap/bin/lxc image import lxditems/lxdimages/607287f518d40783ed968cd2f2434fba101d4332ccc16f1e66cfb43049208d57.tar.gz --alias ronggraph

koDatabase

ko admin

Via following commands for recoving the user priviledge:

# podman exec -it rong_mysql /bin/bash

Sql 操作:

mysqlbash-4.2# mysql -uroot -p
Enter password: 
mysql> use ko
mysql> update ko_user set is_active=1 where name='admin';
mysql> update ko_user set is_admin=1 where name='admin;

Now go back to login page, you will use admin user for login.

ko cluster import

Import cluster to ko:

root@focal-1:/mnt/Rong_RongGraph/rong/4_addons# kubectl get sa -n kube-system | grep dashboard
kubernetes-dashboard                 1         10m
root@focal-1:/mnt/Rong_RongGraph/rong/4_addons# kubectl get secret -n kube-system | grep dashboard
kubernetes-dashboard-certs                       Opaque                                0      10m
kubernetes-dashboard-csrf                        Opaque                                1      10m
kubernetes-dashboard-key-holder                  Opaque                                2      10m
kubernetes-dashboard-token-mpf77                 kubernetes.io/service-account-token   3      10m
root@focal-1:/mnt/Rong_RongGraph/rong/4_addons# kubectl -n kube-system describe secrets kubernetes-dashboard-token-mpf77
Name:         kubernetes-dashboard-token-mpf77
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
              kubernetes.io/service-account.uid: ff6cac3e-d90c-4990-bb90-e245ac762696

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      xxxxxxx

KubeOVNWorkingTips

Server side(k8s)

kkk.yaml defined the subnet created via kubeovn

apiVersion: kubeovn.io/v1
kind: Subnet
metadata:
  name: etc
spec:
  protocol: IPv4
  default: false
  namespaces:
  - etl
  - etl1
  cidrBlock: 100.64.0.0/16
  gateway: 100.64.0.1
  excludeIps:
  - 100.64.0.1
  private: false
  gatewayType: distributed
  natOutgoing: false

Create the subnet via kubectl create -f kkk.yaml, then you could view the subnet via:

# kubectl get subnet
NAME	PROTOCOL	CIDR		PRIVATE	NAT	DEFAULT	GATEWAYTYPE	USED AVAILABLE
etc	IPV4		100.64.0.0/16	false	false	false	distributed	1	65532

Create namespace via kubectl create ns etl and kubectl create ns etl1, then run a deployment in these 2 namespace:

# kubectl run nginxetl --image=nginx:1.17 --namespace etl
# kubectl get pod -n etl -o wide
The pod's ip address is 100.64.0.3

Client Side(outer space machines)

Add route via:

# route add -net 100.64.0.0/16 gw 192.192.xxx.xxx
# curl 100.64.0.3

OpenWRTBox

Tips

Version: ATTITUDE ADJUSTMENT (12.09, r36088), so we have to login into this box via:

$ ssh -oKexAlgorithms=+diffie-hellman-group1-sha1  root@192.168.2.1

Luckly we got the dhcp enabled in this equipment!!! Otherwise this equipment is bricked.

Change wifi setting under Network->Wifi->Interface Configuration->General Setup:

/images/2020_09_28_09_49_31_986x277.jpg

etcdRecovery

主节点操作:

root@newnode-1:/home/test# ETCDCTL_API=3 etcdctl --endpoints=https://192.168.122.21:2379 --cacert="/etc/ssl/etcd/ssl/ca.pem" --cert="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" member list
4047613ce64ac480, started, etcd2, https://192.168.122.58:2380, https://192.168.122.58:2379
ac76e9faf75cf70f, started, etcd3, https://192.168.122.75:2380, https://192.168.122.75:2379
e99611c964d08e01, started, etcd1, https://192.168.122.21:2380, https://192.168.122.21:2379
root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" cluster-health
member 4047613ce64ac480 is healthy: got healthy result from https://192.168.122.58:2379
failed to check the health of member ac76e9faf75cf70f on https://192.168.122.75:2379: Get https://192.168.122.75:2379/health: dial tcp 192.168.122.75:2379: connect: connection refused
member ac76e9faf75cf70f is unreachable: [https://192.168.122.75:2379] are all unreachable
member e99611c964d08e01 is healthy: got healthy result from https://192.168.122.21:2379
cluster is degraded

删除问题节点:

root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" member remove ac76e9faf75cf70f
Removed member ac76e9faf75cf70f from cluster
root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" member list
4047613ce64ac480: name=etcd2 peerURLs=https://192.168.122.58:2380 clientURLs=https://192.168.122.58:2379 isLeader=true
e99611c964d08e01: name=etcd1 peerURLs=https://192.168.122.21:2380 clientURLs=https://192.168.122.21:2379 isLeader=false

问题节点上操作:

systemctl stop etcd
mv /var/lib/etcd /var/lib/etcd.back
mkdir /var/lib/etcd
systemctl start etcd

新增:

root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" member add etcd3 https://192.168.122.75:2380
Added member named etcd3 with ID 318e07d1cc0d3933 to cluster

ETCD_NAME="etcd3"
ETCD_INITIAL_CLUSTER="etcd3=https://192.168.122.75:2380,etcd2=https://192.168.122.58:2380,etcd1=https://192.168.122.21:2380"
ETCD_INITIAL_CLUSTER_STATE="existing"
root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" member list
318e07d1cc0d3933[unstarted]: peerURLs=https://192.168.122.75:2380
4047613ce64ac480: name=etcd2 peerURLs=https://192.168.122.58:2380 clientURLs=https://192.168.122.58:2379 isLeader=true
e99611c964d08e01: name=etcd1 peerURLs=https://192.168.122.21:2380 clientURLs=https://192.168.122.21:2379 isLeader=false

如果是unstarted 状态,则到有问题节点:

systemctl stop etcd
rm -rf /var/lib/etcd/member
systemctl start etcd

回到主节点, 观察集群状态是否回复成功

root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" member list
4047613ce64ac480: name=etcd2 peerURLs=https://192.168.122.58:2380 clientURLs=https://192.168.122.58:2379 isLeader=true
531c8ba1dbabce70: name=etcd3 peerURLs=https://192.168.122.75:2380 clientURLs=https://192.168.122.75:2379 isLeader=false
e99611c964d08e01: name=etcd1 peerURLs=https://192.168.122.21:2380 clientURLs=https://192.168.122.21:2379 isLeader=false
root@newnode-1:/home/test# ETCDCTL_API=2 etcdctl --endpoints=https://192.168.122.21:2379 --ca-file="/etc/ssl/etcd/ssl/ca.pem" --cert-file="/etc/ssl/etcd/ssl/member-newnode-1.pem" --key-file="/etc/ssl/etcd/ssl/member-newnode-1-key.pem" cluster-health
member 4047613ce64ac480 is healthy: got healthy result from https://192.168.122.58:2379
member 531c8ba1dbabce70 is healthy: got healthy result from https://192.168.122.75:2379
member e99611c964d08e01 is healthy: got healthy result from https://192.168.122.21:2379
cluster is healthy