TestVirtio

创建:

qemu-img create -f qcow2 testvirtio.qcow2 80G

挂载:

/images/2024_02_22_19_46_58_351x205.jpg

进入SATA启动的win10系统后, 磁盘管理中可看(磁盘1, 对应F盘):

/images/2024_02_22_19_47_59_732x561.jpg

关机,将SATA盘切换为virtio.

切换前:

/images/2024_02_22_19_49_01_657x304.jpg

添加:

/images/2024_02_22_19_49_27_440x207.jpg

添加后:

/images/2024_02_22_19_49_40_522x411.jpg

更换启动顺序:

/images/2024_02_22_19_50_26_493x439.jpg

启动蓝屏:

/images/2024_02_22_19_50_39_897x661.jpg

设备管理器中确实有virtio的驱动,但是启动时切换会蓝屏:

/images/2024_02_22_19_52_02_359x196.jpg

WorkingTipsForXenVmExport

在XenServer虚机实例中,删除1中所示意的两个软件, 将C:\windows\system32\drivers\xen.sys删除或者转移目录:

/images/2024_02_22_19_15_34_749x514.jpg

从Xencenter导出镜像文件, 可通过XenCenter或者命令行导出,导出格式为xva格式.

编译xva-img:

$ git clone https://github.com/eriklax/xva-img.git
$ cd xva-img/
$ sudo apt install -y cmake libxxhash-dev
$ cmake .
$ sudo make install
$ which xva-img
/usr/local/bin/xva-img

解压xva文件并进行转换:

$ mkdir win10 && cd win10 && tar xf ../20240222XXXXXXXXXXX.xva
$ cd ..
$ xva-img -p disk-export win10/Ref\:21/ virtual_win10.raw

使用导出的virtual_win10.raw文件启动Qemu虚机, 注意磁盘类型为SATA:

/images/2024_02_22_19_22_08_481x314.jpg

WorkingTipsOnXenBasedMigration

1. 安装Xenserver

创建启动盘:

$ sudo dd if=./XenServer8_2024-01-09.iso of=/dev/sdb bs=1M && sudo sync

U盘启动工作站开始安装:

/images/2024_02_21_11_05_26_963x436.jpg

/images/2024_02_21_11_06_10_798x581.jpg

/images/2024_02_21_11_06_23_1226x540.jpg

/images/2024_02_21_11_06_34_1233x970.jpg

/images/2024_02_21_11_06_47_1163x512.jpg

/images/2024_02_21_11_07_11_1327x558.jpg

/images/2024_02_21_11_07_25_936x470.jpg

/images/2024_02_21_11_07_39_754x388.jpg

/images/2024_02_21_11_07_55_879x508.jpg

/images/2024_02_21_11_09_07_902x448.jpg

/images/2024_02_21_11_09_23_729x602.jpg

/images/2024_02_21_11_09_38_811x587.jpg

/images/2024_02_21_11_09_51_813x421.jpg

/images/2024_02_21_11_10_03_967x421.jpg

/images/2024_02_21_11_10_16_1199x236.jpg

/images/2024_02_21_11_11_55_1009x292.jpg

/images/2024_02_21_11_15_43_1003x458.jpg

登陆:

[root@xenserver-tyy ~]# cat /etc/issue
XenServer 8

System Booted: 2024-02-21 11:16

Your XenServer host has now finished booting. 
To manage this server please use the XenCenter application. 
You can install XenCenter for Windows from https://www.xenserver.com/downloads.

You can connect to this system using one of the following network
addresses:

IP address not configured

[root@xenserver-tyy ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:           1706         110        1278           8         318        1534
Swap:          1023           0        1023
[root@xenserver-tyy ~]# 

XenCenter

/images/2024_02_21_11_20_15_538x406.jpg

/images/2024_02_21_11_20_25_613x458.jpg

/images/2024_02_21_11_20_36_489x383.jpg

/images/2024_02_21_11_20_50_505x385.jpg

/images/2024_02_21_11_21_12_1006x755.jpg

增加新xenserver:

/images/2024_02_21_11_21_35_591x305.jpg

/images/2024_02_21_11_21_52_652x291.jpg

添加的xenserver-tyy

/images/2024_02_21_11_22_10_989x712.jpg

import类型只支持:

/images/2024_02_21_11_32_27_429x125.jpg

所以需要转换为vmdk?

/images/2024_02_21_12_11_30_812x534.jpg

/images/2024_02_21_12_11_51_810x533.jpg

/images/2024_02_21_12_12_03_822x429.jpg

/images/2024_02_21_12_18_35_810x551.jpg

转换格式

tigervncworkingtips

Install lxqt for using for vnc:

$ sudo pacman -S lxqt

Enable the vnc configration for using lxqt:

$ cat ~/.vnc/config
session=lxqt
geometry=1920x1080
alwaysshared

enable the linux user for session:

$ cat /etc/tigervnc/vncserver.users 
# TigerVNC User assignment
#
# This file assigns users to specific VNC display numbers.
# The syntax is <display>=<username>. E.g.:
#
# :2=andrew
# :3=lisa
 :1=dash

Now setup the vncpasswd:

$ vncpasswd

Then you could start the vnc via:

$ sudo systemctl start vncserver@:1
$ sudo systemctl enable vncserver@:1

WorkingTipsOnBuild6130tcKernel

Preparation

Definition for the vagrant build machine:

$ cat Vagrantfile 
Vagrant.configure("2") do |config|
  config.vm.box = "bento/ubuntu-22.04"
  config.disksize.size = '180GB'

config.vm.provider "virtualbox" do |v|
  v.memory = 65535
  v.cpus = 12
end

end
$ pwd
/home/dash/Code/vagrant/buildUbuntuKernel610tc

Extend the partition after login to the vm:

lvextend -l +100%FREE /dev/ubuntu-vg/ubuntu-lv 
resize2fs /dev/ubuntu-vg/ubuntu-lv

update the system:

apt update -y && apt upgrade -y

Install necessary packages for building kernel:

sudo apt install libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf llvm
sudo apt build-dep linux linux-image-unsigned-6.1.0-1029-oem

Source Code

enable all of the items of deb-src in /etc/apt/sources.list, then sudo apt update -y.

mkdir -p ~/Code/Code6101029
cd ~/Code/Code6101029
apt source linux-image-unsigned-6.1.0-1029-oem

Build the packages:

cd /home/vagrant/Code/Code6101029/linux-oem-6.1-6.1.0
fakeroot debian/rules clean
fakeroot debian/rules binary

Check the build result:

$ ls
linux-buildinfo-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-headers-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-image-unsigned-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-modules-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-modules-ipu6-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-modules-ivsc-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-modules-iwlwifi-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb
linux-oem-6.1-headers-6.1.0-1033_6.1.0-1033.33_all.deb
linux-oem-6.1-tools-6.1.0-1033_6.1.0-1033.33_amd64.deb
linux-oem-6.1-tools-host_6.1.0-1033.33_all.deb
linux-tools-6.1.0-1033-oem_6.1.0-1033.33_amd64.deb

Using docker for building

Initialize a docker instance for building:

sudo docker run -it -v /media/sdc/Code/buildkernel:/buildkernel ubuntu:22.04 /bin/bash

enter the docker and run:

apt update -y
apt install -y vim
vim /etc/apt/sources.list
apt update -y
apt install libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf llvm debhelper rsync python3-docutils bc libcap-dev git build-essential  asciidoc cpio libjvmti-oprofile0 linux-tools-common default-jdk binutils-dev libbtf1 libdwarf-dev dwarf* pahole libdwarf1 libdwarf++0
# get the dependencies via "dpkg-buildpackage -b"
apt install -y makedumpfile libnewt-dev libdw-dev pkg-config libunwind8-dev liblzma-dev libaudit-dev uuid-dev libnuma-dev zstd fig2dev sharutils python3-dev python3-sphinx python3-sphinx-rtd-theme imagemagick graphviz dvipng fonts-noto-cjk latexmk librsvg2-bin
mkdir -p  /buildkernel/oem && cd /buildkernel/oem
apt source linux-image-unsigned-6.1.0-1029-oem
cd linux-oem-6.1
cd linux-oem-6.1-6.1.0/
time sh -c 'fakeroot debian/rules clean && fakeroot debian/rules binary'

build time:

real	32m18.795s
user	309m36.938s
sys	43m52.711s

Customize kernel config file

Fetch the config file:

scp remote_config_files /root/config.common.ubuntu
cp /root/config.common.ubuntu linux-oem-6.1-6.1.0/debian.oem/config/

Edit the files:

# vim debian/rules.d/2-binary-arch.mk
......
	if [ -e $(commonconfdir)/config.common.ubuntu ]; then \
		//cat $(commonconfdir)/config.common.ubuntu $(archconfdir)/config.common.$(arch) $(archconfdir)/config.flavour.$(target_flavour) > $(builddir)/build-$*/.config; \
		cat /root/config.common.ubuntu > $(builddir)/build-$*/.config; \
	else \
......
# vim debian/rules.d/4-checks.mk
......
# Check the module list against the last release (always)
module-check-%: $(stampdir)/stamp-install-%
	@echo Debug: $@
	echo "done!"; 
	#$(DROOT)/scripts/checks/module-check "$*" \
	#	"$(prev_abidir)" "$(abidir)" $(do_skip_checks)


.......
config-prepare-check-%: $(stampdir)/stamp-prepare-tree-%
	@echo Debug: $@
	if [ -e $(commonconfdir)/config.common.ubuntu ]; then \
		echo "done!"; \
		#perl -f $(DROOT)/scripts/checks/config-check \
		#	$(builddir)/build-$*/.config "$(arch)" "$*" "$(commonconfdir)" \
		#	"$(skipconfig)" "$(do_enforce_all)"; \

......

patch :

$ patch -p1 < xxxx.patch

Then building.

tc diff for dkms

Create the diff files via:

# diff -x '.*' -Nur i915-sriov-dkms-6.1.73 i915-sriov-dkms-6.1.73.tci>dkms-kexec.patch

apply the patch , then rebuild:

# dkms remove -m i915-sriov-dkms -v 6.1.73
# dkms install -m i915-sriov-dkms -v 6.1.73