DistCCBuildingInLXC

Create a profile named bridgeprofile:

$ lxc profile create bridgeprofile
$  lxc profile show bridgeprofile
config: {}
description: Bridged networking LXD profile
devices:
  eth0:
    name: eth0
    nictype: bridged
    parent: br0
    type: nic
name: bridgeprofile

Steps:

$ cat bridge
config: {}
description: Bridged networking LXD profile
devices:
  eth0:
    name: eth0
    nictype: bridged
    parent: br0
    type: nic
name: bridgeprofile
$ cat bridge | lxc profile edit bridgeprofile

Then create a lxc named distcc:

$ lxc launch -p default -p bridgeprofile ubuntu:22.04 distcc

Edit the netplan confguration in lxc instance(distcc):

root@distcc:~# cat /etc/netplan/50-cloud-init.yaml 
network:
    version: 2
    ethernets:
        eth0:
            dhcp4: false
            addresses: [192.168.1.9/24]
            gateway4: 192.168.1.33
root@distcc:~# netplan

Install distcc via:

apt install -y distcc build-essential

Configure the distcc:

STARTDISTCC="true"
ALLOWEDNETS="192.168.1.0/24"
LISTENER="0.0.0.0"
JOBS="12"

Buildlibvirt8OnUbuntu1804

build steps for building libvirt on ubuntu18.04:

  275  tar xJvf libvirt-8.0.0.tar.xz 
  276  cd libvirt-8.0.0/
  277  ls
  278  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  279  apt-cache search xsltproc
  280  sudo apt install -y xsltproc
  281  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  282  apt-cache search rst2html
  283  apt-cache search rst
  284  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  285  sudo apt-get install python3-docutils
  286  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  287  apt-cache search libtirpc
  288  sudo apt install -y libtirpc-dev
  289  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  290  apt-cache search gnutls
  291  sudo apt install -y libgnutls28-dev
  292  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  293  apt-cache search libxml-2.0
  294  apt-cache search libxml
  295  sudo apt install -y libxml2-dev
  296  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  297  apt-cache search pciaccess
  298  sudo apt install -y libpciaccess-dev
  299  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  300  apt-cache search YAJL
  301  sudo apt install -y libyajl-dev
  302  meson build -Dsystem=true -Ddriver_qemu=enabled -Ddriver_interface=enabled -Ddriver_libvirtd=enabled -Ddriver_remote=enabled -Ddriver_network=enabled --prefix=/usr
  303  history

WorkingTipsOnRongHe

Tips

crontab items:

@reboot /usr/bin/execpipe.sh

execpipe content:

$ cat /usr/bin/execpipe.sh 
#!/bin/bash
while true; do eval "$(cat  /mypipe)" &> /mypipeoutput.txt;done
#while true; do eval "$(cat  /mypipe)";done

Create the pipe via:

$ ls / | grep mypipe
mypipe
mypipeoutput.txt

Kernel Building(VB)

Build the kernel via:

apt install -y git fakeroot build-essential ncurses-dev xz-utils libssl-dev bc flex libelf-dev bison rsync kmod cpio unzip
unzip kernel-config.zip
cp kernel-config/x86_64_defconfig .config
./scripts/config --disable DEBUG_INFO
echo "" | make ARCH=x86_64 olddefconfig
make ARCH=x86_64 -j16 LOCALVERSION=-lts2021-iotg  bindeb-pkg

Kernel patch backport:

drivers/gpu/drm/i915/display/intel_fbc.c, line 1029, not equal to tc's implementation

/drivers/gpu/drm/i915# vim i915_driver.c, 存在较大不同

OnBuildxpumanager

environment

vagrant vm, for using this vm we could reach out the internet(through gfw).

Steps

Get the source code and prepare the code changes:

# apt install -y git build-essential
# git clone  https://github.com/intel/xpumanager.git
# cd xpumanager
# vim ./core/src/vgpu/precheck.cpp +72    
    } else if (cmdRes.output().find("vmx") != std::string::npos) {
        /*
        *   VMX flag detected by lscpu
        */
        result->vmxFlag = true;
    } else {
        result->vmxFlag = true;
        //result->vmxFlag = false;
        //std::string msg = "No VMX flag, Please ensure Intel VT enabled in BIOS";
        //strncpy(result->vmxMessage, msg.c_str(), msg.size() + 1);
    }
# vim builder/Dockerfile.builder-ubuntu
    make -j && make install && \
---->
    make -j8 && make install && \

Build the build docker image, save the iidfile:

$ sudo docker build --build-arg BASE_VERSION=$BASE_VERSION --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy --iidfile /tmp/xpum_builder_ubuntu_$BASE_VERSION.iid -f builder/Dockerfile.builder-ubuntu .
$ sudo docker images
REPOSITORY   TAG       IMAGE ID       CREATED         SIZE
<none>       <none>    8beea6fc722f   9 minutes ago   1.92GB
ubuntu       22.04     b6548eacb063   11 days ago     77.8MB
$ cp /tmp/xpum_builder_ubuntu_22.04.iid ~

Using this docker image for building the deb file(xpumanager):

sudo docker run --rm \
    -v $PWD:$PWD \
    -u $UID \
    -e CCACHE_DIR=$PWD/.ccache \
    -e CCACHE_BASEDIR=$PWD \
    $(cat /tmp/xpum_builder_ubuntu_$BASE_VERSION.iid) $PWD/build.sh
cp /home/vagrant/xpumanager/build/xpumanager_1.2.25_20231213.023315.251edc28~u22.04_amd64.deb ~

Build the xpu-smi:

rm -fr build
sudo docker run --rm \
    -v $PWD:$PWD \
    -u $UID \
    -e CCACHE_DIR=$PWD/.ccache \
    -e CCACHE_BASEDIR=$PWD \
    $(cat /tmp/xpum_builder_ubuntu_$BASE_VERSION.iid) $PWD/build.sh -DDAEMONLESS=ON
cp /home/vagrant/xpumanager/build/xpu-smi_1.2.25_20231213.023748.251edc28~u22.04_amd64.deb ~

verification

Install:

# sudo apt-get install -y ./xpu-smi_1.2.25_20231213.023748.251edc28~u22.04_amd64.deb
# ls /dev/dri/
by-path  card0  card1  renderD128  renderD129

WorkingTipsOnDeployment

1. Partition Preparation

Shrink the home partition:

tar -czvf /root/home.tgz -C /home .
tar -tvf /root/home.tgz
umount /dev/mapper/centos-home
lvremove /dev/mapper/centos-home
lvcreate -L 40GB -n home centos
mkfs.xfs /dev/centos/home
mount /dev/mapper/centos-home
lvextend -r -l +100%FREE /dev/mapper/centos-root
tar -xzvf /root/home.tgz -C /home

Create the gpt partition on nvme disk:

gdisk /dev/nvme0n1
gdisk /dev/nvme1n1
gdisk /dev/nvme2n1
gdisk /dev/nvme3n1

o Enter for new empty GUID partition table (GPT)
y Enter to confirm your decision
n Enter for new partition
Enter for default of first partition
Enter for default of the first sector
Enter for default of the last sector
fd00 Enter for Linux RAID type
w Enter to write changes
y Enter to confirm your decision

Create the raid1 using mdadm:

mdadm --create /dev/md0 --level=1 --raid-devices=2 /dev/nvme0n1p1 /dev/nvme1n1p1
mdadm --create /dev/md1 --level=1 --raid-devices=2 /dev/nvme2n1p1 /dev/nvme3n1p1

Examine the partition via:

lsblk

......
nvme2n1         259:2    0     7T  0 disk  
└─nvme2n1p1     259:5    0     7T  0 part  
  └─md1           9:1    0     7T  0 raid1 
nvme1n1         259:1    0     7T  0 disk  
└─nvme1n1p1     259:4    0     7T  0 part  
  └─md0           9:0    0     7T  0 raid1 
......

Create the pv:

pvcreate /dev/md0
pvcreate /dev/md1

Create the vg:

# vgcreate vmvolume /dev/md0
  Volume group "vmvolume" successfully created
# vgextend vmvolume /dev/md1
  Volume group "vmvolume" successfully extended

2. Create the