Promoxzfstips

Steps

之前:

/images/2020_01_21_16_18_33_1171x255.jpg

ssh登录:

./MegaCli64 -LDInfo -LALL -aAll查看VD信息, 其中VD0(600G)不需要动.

/images/2020_01_21_16_20_59_871x328.jpg

./MegaCli64 -PDList -aAll | grep -i adapter得到adapter数值:

/images/2020_01_21_16_22_31_783x47.jpg

删除VD1-VD3:

# ./MegaCli64 -cfglddel -L1 -a0
# ./MegaCli64 -cfglddel -L2 -a0
# ./MegaCli64 -cfglddel -L3 -a0

当前VD:

/images/2020_01_21_16_23_26_850x391.jpg

查看PD对应磁盘:

# ./MegaCli64 -PDList -aAll | more
两个600G的是0和1, 其他的随便动

查看多少块盘:

# ./MegaCli64 -PDList -aAll |  grep 'Slot Number'

/images/2020_01_21_16_25_57_813x589.jpg

这里注意,2,3 是没有,从4~27为slot number.

得到Enclosure ID:

# ./MegaCli64 -PDList -aAll | grep 'Enclosure'
为9

开始做24个raid0:

# ./MegaCli64 -CfgLdAdd -r0 [9:4] -a0
# ./MegaCli64 -CfgLdAdd -r0 [9:5] -a0
......
# ./MegaCli64 -CfgLdAdd -r0 [9:26] -a0
# ./MegaCli64 -CfgLdAdd -r0 [9:27] -a0

脚本:

/images/2020_01_21_16_30_40_450x557.jpg

lsblk查看磁盘信息:

/images/2020_01_21_16_31_31_562x891.jpg

删除多余分区, sdb/sdn/sdr:

/images/2020_01_21_16_32_53_643x819.jpg

add zfs pool

命令行下添加:

# zpool create -f -o ashift=12 vmpool raidz2 /dev/sdb /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh /dev/sdi
# zpool add -f -o ashift=12 vmpool raidz2 /dev/sdj /dev/sdk /dev/sdl /dev/sdm /dev/sdn /dev/sdo /dev/sdp /dev/sdq
# zpool add -f -o ashift=12 vmpool raidz2 /dev/sdr /dev/sds /dev/sdt /dev/sdu /dev/sdv /dev/sdw /dev/sdx /dev/sdy
# zpool list
NAME     SIZE  ALLOC   FREE  EXPANDSZ   FRAG    CAP  DEDUP  HEALTH  ALTROOT
vmpool   130T  1.97M   130T         -     0%     0%  1.00x  ONLINE  -
# zfs list
NAME     USED  AVAIL  REFER  MOUNTPOINT
vmpool   819K  89.9T   205K  /vmpool

Add in proxmox:

/images/2020_01_21_16_34_13_609x424.jpg

设置参数:

/images/2020_01_21_16_34_50_619x228.jpg

可用:

/images/2020_01_21_16_35_18_891x159.jpg

使用方法:

/images/2020_01_21_16_37_23_712x533.jpg

tipsOnnextcloud

Dockerfile:

version: '3' 

services:

  proxy:
    image: jwilder/nginx-proxy:alpine
    labels:
      - "com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy=true"
    container_name: nextcloud-proxy
    networks:
      - nextcloud_network
    ports:
      - 0.0.0.0:80:80
      - 0.0.0.0:443:443
    volumes:
      - ./proxy/conf.d:/etc/nginx/conf.d:rw
      - ./proxy/vhost.d:/etc/nginx/vhost.d:rw
      - ./proxy/html:/usr/share/nginx/html:rw
      - ./proxy/certs:/etc/nginx/certs:ro
      - /etc/localtime:/etc/localtime:ro
      - /var/run/docker.sock:/tmp/docker.sock:ro
    restart: unless-stopped
  
  letsencrypt:
    image: jrcs/letsencrypt-nginx-proxy-companion
    container_name: nextcloud-letsencrypt
    depends_on:
      - proxy
    networks:
      - nextcloud_network
    volumes:
      - ./proxy/certs:/etc/nginx/certs:rw
      - ./proxy/vhost.d:/etc/nginx/vhost.d:rw
      - ./proxy/html:/usr/share/nginx/html:rw
      - /etc/localtime:/etc/localtime:ro
      - /var/run/docker.sock:/var/run/docker.sock:ro
    restart: unless-stopped
  db:
    image: mariadb
    container_name: nextcloud-mariadb
    networks:
      - nextcloud_network
    volumes:
      - db:/var/lib/mysql
      - /etc/localtime:/etc/localtime:ro
    environment:
    # Create a root password for the maraiadb instance.
      - MYSQL_ROOT_PASSWORD=engine123
    # Create a password for the nextcloud users.  If you have to manually connect your database you would use the nextcloud user and this password.
      - MYSQL_PASSWORD=engine123
      - MYSQL_DATABASE=nextcloud
      - MYSQL_USER=nextcloud
    restart: unless-stopped
  
  app:
    image: nextcloud:latest
    container_name: nextcloud-app
    networks:
      - nextcloud_network
    depends_on:
      - letsencrypt
      - proxy
      - db
    volumes:
      - nextcloud:/var/www/html
      - ./app/config:/var/www/html/config
      - ./app/custom_apps:/var/www/html/custom_apps
      - ./app/data:/var/www/html/data
      - ./app/themes:/var/www/html/themes
      - /etc/localtime:/etc/localtime:ro
    environment:
    # The VIRTUAL_HOST and LETSENCRYPT_HOST should use the same publically reachable domain for your nextlcloud instance.
      - VIRTUAL_HOST=mynextcloud.mooo.com
      - LETSENCRYPT_HOST=mynextcloud.mooo.com
    # This needs to be a real email as it will be used by let's encrypt for your cert and is used to warn you about renewals.
      - LETSENCRYPT_EMAIL=feipyang@gmail.com
    restart: unless-stopped
  collab:
    image: collabora/code
    container_name: nextcloud-collab
    networks:
      - nextcloud_network
    depends_on:
      - proxy
      - letsencrypt
    cap_add:
     - MKNOD
    ports:
      - 0.0.0.0:9980:9980
    environment:
    # This nees to be the same as what you set your app domain too (ex: cloud.domain.tld).
    #- domain=cloud\\.DOMAIN\\.TDL
      - domain=mynextcloud\.mooo\.com
      - username=admin
    # Create a passoword for the collabora office admin page.
    #- password=CREATE-A-SECURE-PASSWORD-HERE
      - password=engine123
      - VIRTUAL_PORT=9980
      - extra_params=--o:ssl.enable=false --o:ssl.termination=true
#      - VIRTUAL_PROTO=https
#      - VIRTUAL_PORT=443
    # The VIRTUAL_HOST and LETSENCRYPT_HOST should use the same publically reachable domain for your collabora instance (ex: office.domain.tld).
      - VIRTUAL_HOST=myoffice.mooo.com
      - LETSENCRYPT_HOST=myoffice.mooo.com
    # This needs to be a real email as it will be used by let's encrypt for your cert and is used to warn you about renewals.
      - LETSENCRYPT_EMAIL=feipyang@gmail.com
    restart: unless-stopped 
volumes:
  nextcloud:
  db: 
  
networks:
  nextcloud_network:

For installing apps:

sudo docker cp Client.php fc18391f0a0a:/var/www/html/lib/private/Http/Client/Client.php
Timeout from 30 to 300, then you could install apps. 

HarborArm64Issue

Issue

harbor-log instance launched first, but it complains:

You are required to change your password immediately (password expired)

This is because we build the container images only have 90 days limitation.

Solution

Rebuild all of the harbor images:

root@node:~/harbor# cat harbor-core/Dockerfile 
FROM f9e2034f3a6d
COPY login.defs /etc/login.defs
COPY shadow /etc/shadow
root@node:~/harbor# cat harbor-core/shadow 
root:x:18074:0:99999:7:::
bin:x:18074:0:99999:7:::
daemon:x:18074:0:99999:7:::
messagebus:x:18074:0:99999:7:::
systemd-bus-proxy:x:18074:0:99999:7:::
systemd-journal-gateway:x:18074:0:99999:7:::
systemd-journal-remote:x:18074:0:99999:7:::
systemd-journal-upload:x:18074:0:99999:7:::
systemd-network:x:18074:0:99999:7:::
systemd-resolve:x:18074:0:99999:7:::
systemd-timesync:x:18074:0:99999:7:::
nobody:x:18074:0:99999:7:::
syslog:!:18074::::::

Then we build the image via following command:

# docker build -t goharbor/harbor-db:1.7.0-arm64 harbor-db/

We have to build all of the images:

  docker build -t goharbor/chartmuseum-photon:v0.7.1-1.7.0-arm64 chartmuseum-photon/
  docker build -t goharbor/redis-photon:1.7.0-arm64 redis-photon/
  docker build -t goharbor/clair-photon:v2.0.7-1.7.0-arm64 clair-photon/
  docker build -t  goharbor/notary-server-photon:v0.6.1-1.7.0-arm64 notary-server-photon/
  docker build -t goharbor/notary-signer-photon:v0.6.1-1.7.0-arm64 notary-signer-photon
  docker build -t goharbor/harbor-registryctl:1.7.0-arm64 registry-photon
  docker build -t goharbor/registry-photon:v2.6.2-1.7.0-arm64 registry-photon/
  docker build -t goharbor/harbor-registryctl:1.7.0-arm64 harbor-registryctl/
  docker build -t goharbor/nginx-photon:1.7.0-arm64 nginx-photon/
  docker build -t goharbor/harbor-jobservice:1.7.0-arm64 harbor-jobservice/
  docker build -t goharbor/harbor-core:1.7.0-arm64 harbor-core/
  docker build -t goharbor/harbor-portal:1.7.0-arm64 harbor-portal/
  docker build -t goharbor/harbor-adminserver:1.7.0-arm64 harbor-adminserver/
  docker build -t goharbor/harbor-db:1.7.0-arm64 harbor-db/

WorkingTipsOnkubeadmsslTwo

补充

重启后,在更新过签名的节点上,验证是否可以获取所有的节点, 并列举出当前可用的更新过后的token:

# kubectl get nodes
# kubectl token list

如果没有有效token的话,可以手动创建一个:

# kubectl token create

token应该看起来像6dihyb.d09sbgae8ph2atjw.

ssh到每一个工作节点上,重新连接到master节点上:

# kubeadm join --token=<上一布获取的token>  <master节点IP>:<默认为6443端口> --node-name <master节点node名称>

重新连接后,可以在master节点上验证是否连接成功:

# kubectl get nodes

KubeadmSSLAdjust

检查

一年过期的集群(即将过期):

root@node:/home/test# cd /etc/kubernetes/ssl
root@node:/etc/kubernetes/ssl# for i in `ls *.crt`; do openssl x509 -in $i -noout -dates; done | grep notAfter
notAfter=Jun 13 02:57:27 2020 GMT
notAfter=Jun 13 02:57:28 2020 GMT
notAfter=Jun 11 02:57:27 2029 GMT
notAfter=Jun 11 02:57:28 2029 GMT
notAfter=Jun 13 02:57:29 2020 GMT

获得该集群信息:

# kubectl get nodes -o wide
NAME           STATUS   ROLES    AGE    VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
k8s-master-1   Ready    master   206d   v1.12.4   192.192.185.63   <none>        Ubuntu 16.04.4 LTS   4.4.0-116-generic   docker://18.6.1
k8s-master-2   Ready    node     89d    v1.12.4   192.192.185.64   <none>        Ubuntu 16.04.4 LTS   4.4.0-116-generic   docker://18.6.1
k8s-node-1     Ready    node     111d   v1.12.4   192.192.185.65   <none>        Ubuntu 16.04.6 LTS   4.15.0-45-generic   docker://18.6.1
k8s-node-2     Ready    node     206d   v1.12.4   192.192.185.66   <none>        Ubuntu 16.04.4 LTS   4.4.0-116-generic   docker://18.6.1
k8s-node-3     Ready    node     201d   v1.12.4   192.192.189.61   <none>        Ubuntu 16.04.4 LTS   4.4.0-116-generic   docker://18.6.1

renew apiserver certs:

# kubeadm alpha phase certs renew apiserver --config=/etc/kubernetes/kubeadm-config.v1alpha3.yaml
#  for i in `ls *.crt`; do openssl x509 -in $i -noout -dates; done | grep notAfter
notAfter=Jun 13 02:57:27 2020 GMT
notAfter=Jan  5 09:30:17 2021 GMT
notAfter=Jun 11 02:57:27 2029 GMT
notAfter=Jun 11 02:57:28 2029 GMT
notAfter=Jun 13 02:57:29 2020 GMT

# cd /etc/kubernetes/

# ln -s ssl pki

# kubeadm alpha phase certs renew apiserver --config=/etc/kubernetes/kubeadm-config.v1alpha3.yaml --cert-dir=/etc/kubernetes/ssl

# kubeadm alpha phase certs renew apiserver-kubelet-client  --config=/etc/kubernetes/kubeadm-config.v1alpha3.yaml --cert-dir=/etc/kubernetes/ssl

# kubeadm alpha phase certs renew front-proxy-client --config=/etc/kubernetes/kubeadm-config.v1alpha3.yaml --cert-dir=/etc/kubernetes/ssl
root@node:/etc/kubernetes/ssl# !2036
for i in `ls *.crt`; do openssl x509 -in $i -noout -dates; done | grep notAfter
notAfter=Jan  5 09:36:10 2021 GMT
notAfter=Jan  5 09:30:46 2021 GMT
notAfter=Jun 11 02:57:27 2029 GMT
notAfter=Jun 11 02:57:28 2029 GMT
notAfter=Jan  5 09:37:14 2021 GMT

kubeadm alpha phase kubeconfig all --apiserver-advertise-address=192.192.185.63