8 Commits

Author SHA1 Message Date
Roman Dobosz
e381c0ad1b Install apparmor tools also for Ubuntu Focal.
k8s gate is still on focal, so patch which unblock the apparmor for
jammy does not affect it. Here is the fix for focal as well.

Change-Id: I2a9bc69a59e7d6d21d61e79115d5a3c726c73ab0
(cherry picked from commit bdc0b49ce3)
2023-03-13 16:37:36 +01:00
yangjianfeng
3971293629 Support config image repository for kubeadm
In some places of which network environment was limited, kubeadm
can't pull images from k8s.gcr.io. This patch add a variable
`KUBEADMIN_IMAGE_REPOSITORY` in order to the developer who located in
these places can set the kubeadm to pull container images from
repository that they can access.

Change-Id: I14aed50077ef0760635e575770fd2274cb759c53
(cherry picked from commit 90b4089cda)
2022-03-22 04:41:52 +00:00
4c9398b1c9 Update .gitreview for stable/xena
Change-Id: I5ee92432b7faef502dad85972731a57a38c407ef
2021-09-24 14:57:13 +00:00
Roman Dobosz
d4de1bb990 Change repos from projectatomic to kubic OBS project.
Since projectatomic Ubuntu builds are deprecated, and advice was to
consult upstream documentation[1], Kubernetes with cri-o now rely on
Kubic project, which (among the others) provides packages for Ubuntu
20.04. Let us switch for those.

[1] https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cri-o

Change-Id: Ib06753d22f8859eefedc031094851b052f4105b6
2021-01-25 13:32:40 +01:00
Ghanshyam Mann
74bf39e6a6 Migrate devstack-plugin-container jobs to focal
As per victoria cycle testing runtime and community goal[1]
we need to migrate upstream CI/CD to Ubuntu Focal(20.04).

Tempest based jobs will be migrate automatically once devstack
base job start running on Focal(Depends-On). This commit migrates
devstack-plugin-container job to run on focal.

Depends-On: https://review.opendev.org/#/c/734700

[1] https://governance.openstack.org/tc/goals/selected/victoria/migrate-ci-cd-jobs-to-ubuntu-focal.html

Change-Id: I1a3ac070027805691fc1007458ac02567f847ae9
2020-09-13 04:05:37 +00:00
Hongbin Lu
9620216b35 Tolerate non-existing of cni config file
Change-Id: I761bf9344651ec196471ca57bf0b29184a69e161
2020-05-05 01:26:18 +00:00
Zuul
f5983f3c02 Merge "Configure kata runtime for containerd" 2020-05-01 00:14:28 +00:00
Hongbin Lu
dc944062c3 Configure kata runtime for containerd
Change-Id: I9d9d223effcaa94d0b1b25210a24aaa313353f05
2020-04-12 00:27:23 +00:00
7 changed files with 49 additions and 26 deletions

View File

@@ -2,3 +2,4 @@
host=review.opendev.org
port=29418
project=openstack/devstack-plugin-container.git
defaultbranch=stable/xena

View File

@@ -18,7 +18,7 @@
- job:
name: devstack-plugin-container-k8s
parent: devstack-minimal
nodeset: openstack-two-node-bionic
nodeset: openstack-two-node-focal
pre-run: playbooks/devstack-plugin-container-k8s/pre.yaml
run: playbooks/devstack-plugin-container-k8s/run.yaml
post-run: playbooks/devstack-plugin-container-k8s/post.yaml

View File

@@ -78,7 +78,7 @@ function configure_cni_plugins {
for plugin in ${CNI_PLUGINS_INSTALL_PLUGINS//,/ }; do
local source_config_file
source_config_file=$(ls ${CNI_PLUGINS_CONF_SOURCE_DIR}/*${plugin}.conf 2> /dev/null)
source_config_file=$(ls ${CNI_PLUGINS_CONF_SOURCE_DIR}/*${plugin}.conf 2> /dev/null || true)
if [ $source_config_file ]; then
echo "Found config file for plugin: $plugin"
sudo install -o "$STACK_USER" -m 0664 -t "$CNI_PLUGINS_CONF_DIR" -D \

View File

@@ -40,14 +40,22 @@ function install_crio {
local lsb_dist=${os_VENDOR,,}
local dist_version=${os_CODENAME}
local arch
arch=$(dpkg --print-architecture)
local kubic_obs_project_key="2472d6d0d2f66af87aba8da34d64390375060aa4"
local os="x${os_VENDOR}_${os_RELEASE}"
if is_ubuntu; then
apt_get install apt-transport-https ca-certificates software-properties-common
sudo add-apt-repository -y ppa:projectatomic/ppa
apt_get install apt-transport-https ca-certificates \
software-properties-common
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--recv ${kubic_obs_project_key}
sudo apt-add-repository "deb https://download.opensuse.org/"`
`"repositories/devel:/kubic:/libcontainers:/stable/${os}/ /"
sudo apt-add-repository "deb http://download.opensuse.org/"`
`"repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/"`
`"${CRIO_VERSION}/${os}/ /"
# Installing podman and containerd will get us compatible versions of
# cri-o and runc. And we need podman to manage container images anyway.
apt_get install podman buildah
apt_get install podman buildah cri-o-runc cri-o
elif is_fedora; then
if [[ "$lsb_dist" = "centos" ]]; then
sudo yum-config-manager \
@@ -75,16 +83,6 @@ function configure_crio {
iniset -sudo ${crio_conf} crio.runtime log_level \"info\"
fi
if is_ubuntu; then
# In Ubuntu's a special vendored version of runc is installed with
# cri-o. This means that it'll not work with the system's version of
# runc. Moreover vendored runc is not placed into /usr/bin, where
# crio.conf states that it will be. We fix that by linking the vendored
# binary to /usr/bin.
if [[ ! -e /usr/bin/runc ]]; then
sudo ln -s /usr/lib/cri-o-runc/sbin/runc /usr/bin/runc
sudo chmod +x /usr/bin/runc
fi
# At least for 18.04 we need to set up /etc/containers/registries.conf
# with some initial content. That's another bug with that PPA.
local registries_conf

View File

@@ -33,6 +33,7 @@ ENABLE_CONTAINERD_CRI=$(trueorfalse False ENABLE_CONTAINERD_CRI)
ENABLE_LIVE_RESTORE=$(trueorfalse False ENABLE_LIVE_RESTORE)
ENABLE_IPV6=$(trueorfalse False ENABLE_IPV6)
KATA_BRANCH=${KATA_BRANCH:-master}
KATA_RUNTIME=${KATA_RUNTIME:-kata-runtime}
CONTAINERD_CONF_DIR=/etc/containerd
CONTAINERD_CONF=$CONTAINERD_CONF_DIR/config.toml
@@ -58,6 +59,7 @@ function install_docker {
local arch
arch=$(dpkg --print-architecture)
if is_ubuntu; then
apt_get install apparmor
if [[ ${dist_version} == 'trusty' ]]; then
if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
apt_get install linux-image-extra-$(uname -r) linux-image-extra-virtual
@@ -144,11 +146,11 @@ function configure_docker {
if [[ "$ENABLE_KATA_CONTAINERS" == "True" ]]; then
if sudo grep -E 'svm|vmx' /proc/cpuinfo &> /dev/null; then
runtime_opts+="\"runtimes\": {
\"kata-runtime\": {
\"$KATA_RUNTIME\": {
\"path\": \"/usr/bin/kata-runtime\"
}
},
\"default-runtime\": \"kata-runtime\","
\"default-runtime\": \"$KATA_RUNTIME\","
fi
# TODO(hongbin): deprecate and remove clear container
elif [[ "$ENABLE_CLEAR_CONTAINER" == "True" ]]; then
@@ -223,10 +225,25 @@ function configure_containerd {
sudo mkdir -p $CONTAINERD_CONF_DIR
sudo chown -R $STACK_USER $CONTAINERD_CONF_DIR
containerd config default > $CONTAINERD_CONF
stack_user_gid=$(getent group $STACK_USER | cut -d: -f3)
sed -i "s/gid = [0-9]*/gid = ${stack_user_gid}/" $CONTAINERD_CONF
sed -i "s/level = \"\"/level = \"debug\"/" $CONTAINERD_CONF
cat <<EOF | sudo tee $CONTAINERD_CONF >/dev/null
[grpc]
gid = $stack_user_gid
[debug]
level = "debug"
EOF
if [[ "$ENABLE_KATA_CONTAINERS" == "True" ]]; then
cat <<EOF | sudo tee -a $CONTAINERD_CONF >/dev/null
[plugins]
[plugins.cri]
[plugins.cri.containerd]
[plugins.cri.containerd.runtimes.${KATA_RUNTIME}]
runtime_type = "io.containerd.kata.v2"
EOF
fi
sudo systemctl --no-block restart containerd.service
}
@@ -236,6 +253,7 @@ function stop_docker {
function cleanup_docker {
uninstall_package docker-ce
rm -f $CONTAINERD_CONF
}
# TODO(hongbin): deprecate and remove clear container

View File

@@ -27,7 +27,7 @@ K8S_NODE_IP=${K8S_NODE_IP:-$HOST_IP}
K8S_API_SERVER_PORT=${K8S_API_SERVER_PORT:-6443}
K8S_POD_NETWORK_CIDR=${K8S_POD_NETWORK_CIDR:-10.244.0.0/16}
K8S_SERVICE_NETWORK_CIDR=${K8S_SERVICE_NETWORK_CIDR:-10.96.0.0/12}
K8S_VERSION=${K8S_VERSION:-1.14.1-00}
K8S_VERSION=${K8S_VERSION:-1.19.0-00}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
# Functions
@@ -60,9 +60,10 @@ function install_kubeadm {
function kubeadm_init {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
cat <<EOF | sudo tee $kubeadm_config_file >/dev/null
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
imageRepository: "${KUBEADMIN_IMAGE_REPOSITORY}"
etcd:
external:
endpoints:
@@ -84,6 +85,7 @@ apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
EOF
sudo kubeadm config images pull --image-repository=${KUBEADMIN_IMAGE_REPOSITORY}
sudo kubeadm init --config $kubeadm_config_file --ignore-preflight-errors Swap
local kube_config_file=$HOME/.kube/config
@@ -92,14 +94,14 @@ EOF
safe_chown $STACK_USER:$STACK_USER $kube_config_file
if [[ "$K8S_NETWORK_ADDON" == "flannel" ]]; then
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/4ff77dc7c35851913587f7daccf25d754e77aa65/Documentation/kube-flannel.yml
fi
}
function kubeadm_join {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
cat <<EOF | sudo tee $kubeadm_config_file >/dev/null
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta1
kind: JoinConfiguration
discovery:

View File

@@ -9,6 +9,7 @@ ENABLE_LIVE_RESTORE=${ENABLE_LIVE_RESTORE:-false}
ENABLE_IPV6=${ENABLE_IPV6:-false}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
ENABLE_CONTAINERD_CRI=${ENABLE_CONTAINERD_CRI:-false}
CRIO_VERSION=${CRIO_VERSION:-"1.18:/1.18.0"}
# Enable container services
enable_service container
@@ -20,3 +21,6 @@ if [[ ,${ENABLED_SERVICES} =~ ,"k8s-master" ]]; then
enable_service kube-scheduler
enable_service kube-proxy
fi
# Customize kubeadm container images repository
KUBEADMIN_IMAGE_REPOSITORY=${KUBEADMIN_IMAGE_REPOSITORY:-"k8s.gcr.io"}