Aug 11, 2021
Technologyflash
Create a new partition:
Create new unkown partitions, then a userdata partition:
Layout:
p1: 128MB for boot (fat32, boot & lba)
p2: 768MB for /system
p3: 128MB for /vendor
p4: remainings for /data (ext4)
Manage flags:
Final layout:
Write steps
Write via following steps:
➜ ~ cd rpi4
➜ rpi4 ls
bcm2711-rpi-4-b.dtb boot ramdisk.img system.img vc4-kms-v3d-pi4.dtbo vendor.img zImage
➜ rpi4 sudo dd if=system.img of=/dev/sdb2 bs=1M
768+0 records in
768+0 records out
805306368 bytes (805 MB, 768 MiB) copied, 12.812 s, 62.9 MB/s
➜ rpi4 sudo dd if=vendor.img of=/dev/sdb3 bs=1M
128+0 records in
128+0 records out
134217728 bytes (134 MB, 128 MiB) copied, 2.25087 s, 59.6 MB/s
➜ rpi4 sudo mount /dev/sdb1 /mnt
➜ rpi4 sudo cp boot/* /mnt
➜ rpi4 sudo cp zImage bcm2711-rpi-4-b.dtb ramdisk.img /mnt
➜ rpi4 sudo mkdir /mnt/overlays
➜ rpi4 sudo cp vc4-kms-v3d-pi4.dtbo /mnt/overlays
➜ rpi4 sudo sync
Aug 10, 2021
TechnologyWorkingSteps
Operation steps:
# adb connect 192.168.1.113
already connected to 192.168.1.113:5555
# adb shell
rpi4:/
Or:
# adb root
restarting adbd as root
# adb shell
rpi4:/ # ls
acct boot charger data dev init.environ.rc init.usb.rc mnt proc res sepolicy system vendor_service_contexts
apex bugreports config debug_ramdisk etc init.rc init.zygote32.rc odm product sbin storage ueventd.rc
bin cache d default.prop init init.usb.configfs.rc lost+found oem product_services sdcard sys vendor
rpi4:/ # exit
OR:
adb remount
remount succeeded
➜ Downloads adb shell
rpi4:/ # cd /system/bin/
rpi4:/system/bin # touch fff
rpi4:/system/bin # rm fff
rpi4:/system/bin #
Shutdown the phone:
sudo adb shell reboot -p
Aug 5, 2021
Technology目标
系统化说明如何在vdi设备和idv设备上开启sddm的nested模式下Multiseat登录支持。
环境准备
vdi设备信息如下:
Intel(R) Celeron(R) CPU J1900 @ 1.99GHz
# free -m
总计 已用 空闲 共享 缓冲/缓存 可用
内存: 3826 487 1676 317 1662 2758
交换: 0 0 0
# df -h
文件系统 容量 已用 可用 已用% 挂载点
udev 1.9G 0 1.9G 0% /dev
tmpfs 383M 872K 382M 1% /run
/dev/sda5 27G 5.1G 21G 20% /
# cat /etc/issue
Ubuntu 18.04.5 LTS \n \l
# uname -a
Linux xxxx 5.3.0-24-generic #26~18.04.2-Ubuntu SMP Tue Nov 26 12:34:22 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
安装必要的包:
# sudo apt-get update -y
# sudo apt-get upgrade -y
# sudo apt-get install -y sddm xfce4 unzip autoconf automake libtool pkg-config build-essential
# sudo apt-get install -y x11proto-dev xserver-xorg-dev libxcb-util-dev libxcb-icccm4-dev libxcb-image0-dev libxcb-shm0-dev libxcb-randr0-dev vim cmake cmake-extras extra-cmake-modules libpam-dev libxcb-xkb-dev qt5-default libqt5qml5 qt5-qmltooling-plugins qtdeclarative5-dev qttools5-dev xutils-dev
编译相关包
编译xf86-video-nested
:
$ git clone https://github.com/smemc/xf86-video-nested.git
$ cd xf86-video-nested
$ ./autogen.sh
$ ./configure --prefix=/usr
$ make -j2
$ sudo make install
编译sddm-nested
:
$ git clone https://github.com/purplepalmdash/sddm-nested-multiseat.git
$ cd sddm-nested
$ mkdir build && cd build
$ cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -Wno-dev ..
$ make -j2
$ sudo make install
$ sudo install -v -dm755 -o sddm -g sddm /var/lib/sddm
$ sddm --example-config > sddm.example.conf
$ sudo cp -v sddm.example.conf /etc/sddm.conf
生成配置文件:
cat > /etc/pam.d/sddm << "EOF" &&
# Begin /etc/pam.d/sddm
auth requisite pam_nologin.so
auth required pam_env.so
auth required pam_succeed_if.so uid >= 1000 quiet
auth include common-auth
account include common-account
password include common-password
session required pam_limits.so
session include common-session
# End /etc/pam.d/sddm
EOF
cat > /etc/pam.d/sddm-autologin << "EOF" &&
# Begin /etc/pam.d/sddm-autologin
auth requisite pam_nologin.so
auth required pam_env.so
auth required pam_succeed_if.so uid >= 1000 quiet
auth required pam_permit.so
account include common-account
password required pam_deny.so
session required pam_limits.so
session include common-session
# End /etc/pam.d/sddm-autologin
EOF
cat > /etc/pam.d/sddm-greeter << "EOF"
# Begin /etc/pam.d/sddm-greeter
auth required pam_env.so
auth required pam_permit.so
account required pam_permit.so
password required pam_deny.so
session required pam_unix.so
-session optional pam_systemd.so
# End /etc/pam.d/sddm-greeter
EOF
配置分屏
定义出Intel显卡的输出口:
# vim /etc/X11/xorg.conf.d/20-intel.conf
Section "Device"
Identifier "Intel Graphics"
Driver "intel"
Option "AccelMethod" "sna"
Option "TearFree" "true"
Option "DRI" "3"
Option "Monitor-VGA" "VGA"
Option "Monitor-HDMI2" "HDMI2"
EndSection
定义出显示器配置:
Section "Monitor"
Identifier "VGA"
EndSection
Section "Monitor"
Identifier "HDMI2"
Option "Position" "1920 0"
Option "PreferredMode" "1920x1080"
EndSection
Section "Screen"
Identifier "VGA"
Monitor "VGA"
SubSection "Display"
Depth 24
Modes "1920x1080"
EndSubSection
EndSection
Section "Screen"
Identifier "HDMI2"
Monitor "HDMI2"
SubSection "Display"
Depth 24
Modes "1920x1080"
EndSubSection
EndSection
Section "ServerLayout"
Identifier "L1"
Screen "VGA" 0 0
Screen "HDMI2" 1920 0
Option "BlankTime" "0"
Option "StandbyTime" "0"
Option "SuspendTime" "0"
Option "OffTime" "0"
EndSection
Section "ServerFlags"
Option "BlankTime" "0"
Option "StandbyTime" "0"
Option "SuspendTime" "0"
Option "OffTime" "0"
EndSection
Section "Extensions"
Option "DPMS" "Disable"
EndSection
定义出seat1
和seat2
的配置:
# vim /etc/X11/seat1.conf
Section "Module"
Load "shadow"
Load "fb"
EndSection
Section "Device"
Identifier "seat1"
Driver "nested"
EndSection
Section "Screen"
Identifier "Screen1"
Device "seat1"
DefaultDepth 24
SubSection "Display"
Depth 24
#Modes "3840x2160"
Modes "1920x1080"
EndSubSection
Option "Origin" "1920 0"
EndSection
Section "ServerLayout"
Identifier "Nested"
Screen "Screen1"
Option "AllowEmptyInput" "true"
Option "BlankTime" "0"
Option "StandbyTime" "0"
Option "SuspendTime" "0"
Option "OffTime" "0"
EndSection
Section "ServerFlags"
Option "BlankTime" "0"
Option "StandbyTime" "0"
Option "SuspendTime" "0"
Option "OffTime" "0"
EndSection
# vim /etc/X11/seat2.conf
Section "Module"
Load "shadow"
Load "fb"
EndSection
Section "Device"
Identifier "seat2"
Driver "nested"
EndSection
Section "Screen"
Identifier "Screen1"
Device "seat2"
DefaultDepth 24
SubSection "Display"
Depth 24
Modes "1920x1080"
EndSubSection
Option "Origin" "0 0"
EndSection
Section "ServerLayout"
Identifier "Nested"
Screen "Screen1"
Option "AllowEmptyInput" "true"
Option "BlankTime" "0"
Option "StandbyTime" "0"
Option "SuspendTime" "0"
Option "OffTime" "0"
EndSection
Section "ServerFlags"
Option "BlankTime" "0"
Option "StandbyTime" "0"
Option "SuspendTime" "0"
Option "OffTime" "0"
EndSection
定义出USB口与seat的映射关系(注意前置的口USB编号与机箱后部的USB编号的对应关系):
# vim /etc/udev/rules.d/70-seat.rules
SUBSYSTEM=="input", DEVPATH=="/devices/pci0000:00/0000:00:14.0/usb1/1-1/*", TAG+="seat", ENV{ID_SEAT}="seat2", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
SUBSYSTEM=="input", DEVPATH=="/devices/pci0000:00/0000:00:14.0/usb1/1-4/*", TAG+="seat", ENV{ID_SEAT}="seat2", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
SUBSYSTEM=="input", ATTRS{name}=="Power Button", TAG+="seat", ENV{ID_SEAT}="seat2", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
#SUBSYSTEM=="input", DEVPATH=="/devices/pci0000:00/0000:00:14.0/usb1/1-9/*", TAG+="seat", ENV{ID_SEAT}="seat1", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
SUBSYSTEM=="input", DEVPATH=="/devices/pci0000:00/0000:00:14.0/usb1/1-2/*", TAG+="seat", ENV{ID_SEAT}="seat1", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
#SUBSYSTEM=="input", DEVPATH=="/devices/pci0000:00/0000:00:14.0/usb1/1-6/*", TAG+="seat", ENV{ID_SEAT}="seat1", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
SUBSYSTEM=="input", ATTRS{name}=="Sleep Button", TAG+="seat", ENV{ID_SEAT}="seat1", TAG+="master-of-seat", PROGRAM="/usr/bin/sed -n 's/.*startseat=\([^ ]*\).*/\1/p' /proc/cmdline", RESULT=="true"
开启multi-seat:
# vim /etc/default/grub
.....
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash startseat=true"
GRUB_CMDLINE_LINUX="startseat=true"
....
# update-grub2
重启后验证:
# cat /proc/cmdline
BOOT_IMAGE=/boot/vmlinuz-5.3.0-24-generic root=UUID=50368a13-e1d7-4699-8b3b-b5ffc1d148e6 ro startseat=true quiet splash startseat=true vt.handoff=1
配置多用户登录
创建两个用户seat1
和seat2
:
# useradd -m seat1
# useradd -m seat2
# passwd seat1
# passwd seat2
配置其自动登录([AutoLogin]
内只保留如下所示部分):
# cat /etc/sddm.conf | more
[Autologin]
# Whether sddm should automatically log back into sessions when they exit
# Whether sddm should automatically log back into sessions when they exit
Relogin=false,false
SeatName=seat1,seat2
#Session=awesome,awesome
Session=xfce,xfce
User=seat1,seat2
....
Awesome自启动项
在awesome
的情景下,我们配置其登录方式为:
# su seat1
$ /bin/bash
$ mkdir -p ~/.config/awesome
$ cp /etc/xdg/awesome/rc.lua ~//.config/awesome
$ vim ~/.config/awesome/autorun.sh
$ vim ~/.config/awesome/autorun.sh
#!/usr/bin/env bash
function run {
if ! pgrep -f $1 ;
then
$@&
fi
}
run sleep3 && /opt/ctg/CtyunClouddeskUniversal/CtyunClouddeskUniversal
$ vim ~/.config/awesome/rc.lua
最后一行添加:
awful.spawn.with_shell("~/.config/awesome/autorun.sh")
$ chmod 777 ~/.config/awesome/autorun.sh
$ rm -rf /tmp/awesome/
$ cp -r ~/.config/awesome /tmp
$ exit
exit
$ exit
# su seat2
$ /bin/bash
$ cp -r /tmp/awesome/ ~/.config/
$ chmod 777 ~/.config/awesome/autorun.sh
$ exit
exit
$ exit
现在重启后即可看到结果,其结果表现为,两路显示独立输出且进入到idv登录前界面。
Jul 21, 2021
Technology环境
两台虚拟机用来模拟现实环境中的双节点(可以拓展到多节点)场景下的物理机器关机导致的LXD容器的迁移情况。
机器配置(以mig2为例):
root@mig2:/home/test# cat /etc/issue
Ubuntu 20.04.2 LTS \n \l
root@mig2:/home/test# free -g
total used free shared buff/cache available
Mem: 9 0 8 0 0 9
Swap: 0 0 0
root@mig2:/home/test# lxd --version
4.0.7
步骤
mig1节点上初始化:
root@mig1:/home/test# lxd init
Would you like to use LXD clustering? (yes/no) [default=no]: yes
What IP address or DNS name should be used to reach this node? [default=192.168.89.11]:
Are you joining an existing cluster? (yes/no) [default=no]: no
What name should be used to identify this node in the cluster? [default=mig1]:
Setup password authentication on the cluster? (yes/no) [default=no]: yes
Trust password for new clients:
Again:
Do you want to configure a new local storage pool? (yes/no) [default=yes]:
Name of the storage backend to use (lvm, zfs, btrfs, dir) [default=zfs]:
Create a new ZFS pool? (yes/no) [default=yes]:
Would you like to use an existing empty block device (e.g. a disk or partition)? (yes/no) [default=no]:
Size in GB of the new loop device (1GB minimum) [default=30GB]:
Do you want to configure a new remote storage pool? (yes/no) [default=no]:
Would you like to connect to a MAAS server? (yes/no) [default=no]:
Would you like to configure LXD to use an existing bridge or host interface? (yes/no) [default=no]:
Would you like to create a new Fan overlay network? (yes/no) [default=yes]:
What subnet should be used as the Fan underlay? [default=auto]:
Would you like stale cached images to be updated automatically? (yes/no) [default=yes]
Would you like a YAML "lxd init" preseed to be printed? (yes/no) [default=no]:
mig2节点加入集群:
root@mig2:/home/test# lxd init
Would you like to use LXD clustering? (yes/no) [default=no]: yes
What IP address or DNS name should be used to reach this node? [default=192.168.89.12]:
Are you joining an existing cluster? (yes/no) [default=no]: yes
Do you have a join token? (yes/no) [default=no]: no
What name should be used to identify this node in the cluster? [default=mig2]:
IP address or FQDN of an existing cluster node: 192.168.89.11
Cluster fingerprint: 75ee6a1962985e0262d6bea9f95d554f197719cca19820671856280fe0d2e28b
You can validate this fingerprint by running "lxc info" locally on an existing node.
Is this the correct fingerprint? (yes/no) [default=no]: yes
Cluster trust password:
All existing data is lost when joining a cluster, continue? (yes/no) [default=no] yes
Choose "zfs.pool_name" property for storage pool "local":
Choose "size" property for storage pool "local": 30GB
Choose "source" property for storage pool "local":
Would you like a YAML "lxd init" preseed to be printed? (yes/no) [default=no]:
root@mig2:/home/test#
创建成功后,检查cluster情况:
# root@mig2:/home/test# lxc cluster list
To start your first instance, try: lxc launch ubuntu:18.04
+------+----------------------------+----------+--------+-------------------+--------------+
| NAME | URL | DATABASE | STATE | MESSAGE | ARCHITECTURE |
+------+----------------------------+----------+--------+-------------------+--------------+
| mig1 | https://192.168.89.11:8443 | YES | ONLINE | Fully operational | x86_64 |
+------+----------------------------+----------+--------+-------------------+--------------+
| mig2 | https://192.168.89.12:8443 | YES | ONLINE | Fully operational | x86_64 |
+------+----------------------------+----------+--------+-------------------+--------------+