3 Commits

Author SHA1 Message Date
Roman Dobosz
e381c0ad1b Install apparmor tools also for Ubuntu Focal.
k8s gate is still on focal, so patch which unblock the apparmor for
jammy does not affect it. Here is the fix for focal as well.

Change-Id: I2a9bc69a59e7d6d21d61e79115d5a3c726c73ab0
(cherry picked from commit bdc0b49ce3)
2023-03-13 16:37:36 +01:00
yangjianfeng
3971293629 Support config image repository for kubeadm
In some places of which network environment was limited, kubeadm
can't pull images from k8s.gcr.io. This patch add a variable
`KUBEADMIN_IMAGE_REPOSITORY` in order to the developer who located in
these places can set the kubeadm to pull container images from
repository that they can access.

Change-Id: I14aed50077ef0760635e575770fd2274cb759c53
(cherry picked from commit 90b4089cda)
2022-03-22 04:41:52 +00:00
4c9398b1c9 Update .gitreview for stable/xena
Change-Id: I5ee92432b7faef502dad85972731a57a38c407ef
2021-09-24 14:57:13 +00:00
7 changed files with 26 additions and 193 deletions

View File

@@ -2,4 +2,4 @@
host=review.opendev.org
port=29418
project=openstack/devstack-plugin-container.git
defaultbranch=stable/2024.1
defaultbranch=stable/xena

View File

@@ -7,6 +7,7 @@
timeout: 4200
required-projects:
- openstack/devstack
- openstack/devstack-gate
- openstack/devstack-plugin-container
vars:
devstack_localrc:
@@ -24,6 +25,7 @@
timeout: 7200
required-projects:
- openstack/devstack
- openstack/devstack-gate
- openstack/devstack-plugin-container
vars:
devstack_services:

View File

@@ -20,7 +20,6 @@ set +o xtrace
# --------
CRIO_ENGINE_SOCKET_FILE=${CRIO_ENGINE_SOCKET_FILE:-/var/run/crio/crio.sock}
CRIO_ALLOW_ICMP=$(trueorfalse True CRIO_ALLOW_ICMP)
# Functions
# ---------
@@ -48,16 +47,15 @@ function install_crio {
software-properties-common
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--recv ${kubic_obs_project_key}
sudo apt-add-repository -y "deb https://download.opensuse.org/"`
sudo apt-add-repository "deb https://download.opensuse.org/"`
`"repositories/devel:/kubic:/libcontainers:/stable/${os}/ /"
sudo apt-add-repository -y "deb http://download.opensuse.org/"`
sudo apt-add-repository "deb http://download.opensuse.org/"`
`"repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/"`
`"${CRIO_VERSION}/${os}/ /"
# Installing podman and containerd will get us compatible versions of
# cri-o and runc. And we need podman to manage container images anyway.
apt_get install podman buildah cri-o-runc cri-o
sudo systemctl enable crio
elif is_fedora; then
if [[ "$lsb_dist" = "centos" ]]; then
sudo yum-config-manager \
@@ -67,18 +65,6 @@ function install_crio {
--add-repo \
https://cbs.centos.org/repos/paas7-crio-311-candidate/x86_64/os/
fi
if [[ "${os_VENDOR}" == *'Stream' ]]; then
local stream="_Stream"
fi
# NOTE: All crio versions are not supported for Centos 8 stream
# because crio rpm is not present for some minor versions
sudo yum-config-manager \
--add-repo \
"https://download.opensuse.org/repositories/"`
`"devel:/kubic:/libcontainers:/stable:/cri-o:/${CRIO_VERSION}/"`
`"CentOS_${os_RELEASE}${stream}/"`
`"devel:kubic:libcontainers:stable:cri-o:${CRIO_VERSION}.repo"
yum_install cri-o podman buildah
fi
}
@@ -87,85 +73,36 @@ function configure_crio {
# After an ./unstack it will be stopped. So it is ok if it returns exit-code == 1
sudo systemctl stop crio.service || true
export CRIO_CONF="/etc/crio/crio.conf"
local crio_conf
crio_conf=/etc/crio/crio.conf
# We're wrapping values in \"<val>\" because that's the format cri-o wants.
iniset -sudo ${CRIO_CONF} crio.api listen \"${CRIO_ENGINE_SOCKET_FILE}\"
iniset -sudo ${CRIO_CONF} crio.image pause_image \"${CRIO_PAUSE_IMAGE}\"
iniset -sudo ${CRIO_CONF} crio.image pause_command \"${CRIO_PAUSE_COMMAND}\"
iniset -sudo ${crio_conf} crio.api listen \"${CRIO_ENGINE_SOCKET_FILE}\"
if [[ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]]; then
# debug is way too verbose, info will be enough
iniset -sudo ${CRIO_CONF} crio.runtime log_level \"info\"
iniset -sudo ${crio_conf} crio.runtime log_level \"info\"
fi
if is_ubuntu; then
local crio_minor=${CRIO_VERSION#*.}
# At least for 18.04 we need to set up /etc/containers/registries.conf
# with some initial content. That's another bug with that PPA.
local registries_conf
registries_conf="/etc/containers/registries.conf"
if [[ ! -f ${registries_conf} && $crio_minor -lt 24 ]]; then
if [[ ! -f ${registries_conf} ]]; then
sudo mkdir -p `dirname ${registries_conf}`
cat << EOF | sudo tee ${registries_conf}
[registries.search]
registries = ['docker.io']
EOF
else
# If there is a config file, that means, we are probably on the
# newer version of crio/container/podman, which basically means
# we cannot mix [registries.search] registries filled with
# something and unqualified-search-registries setting which appear
# on sysregistry v2 config syntax. And because it's a TOML now, we
# cannot rely on iniset, but directly change the file.
local rname='unqualified-search-registries'
local rval='["docker.io", "quay.io"]'
if [[ ! -f ${registries_conf} ]]; then
cat << EOF | sudo tee ${registries_conf}
unqualified-search-registries = ["docker.io", "quay.io"]
EOF
elif grep -wq "^${rname}" "${registries_conf}"; then
sudo sed -i -e \
"s/^${rname}.*$/${rname} = ${rval}/" "${registries_conf}"
else
sudo sed -i "1s/^/${rname} = ${rval}\n/" "${registries_conf}"
fi
fi
# CRI-O from kubic repo have placed runc in different place, not even
# in path, just to not conflict with runc package from official repo.
# We need to change it.
iniset -sudo ${CRIO_CONF} crio.runtime.runtimes.runc runtime_path \
\"/usr/lib/cri-o-runc/sbin/runc\"
if [ -n "${CNI_CONF_DIR}" ]; then
iniset -sudo ${CRIO_CONF} crio.network network_dir \
\"${CNI_CONF_DIR}\"
fi
if [ -n "${CNI_PLUGIN_DIR}" ]; then
iniset -sudo ${CRIO_CONF} crio.network plugin_dir \
\"${CNI_PLUGIN_DIR}\"
fi
# By default CRI-O doesn't allow ICMP between containers, although it
# is ususally expected for testing purposes.
if [ "${CRIO_ALLOW_ICMP}" == "True" ]; then
if grep -wq '^default_sysctls' ${CRIO_CONF}; then
export CRIO_KEY="default_sysctls"
export CRIO_VAL='[ "net.ipv4.ping_group_range=0 2147483647", ]'
_update_config
else
iniset -sudo ${CRIO_CONF} crio.runtime default_sysctls \
'[ "net.ipv4.ping_group_range=0 2147483647", ]'
fi
fi
elif is_fedora; then
local lsb_dist=${os_VENDOR,,}
if [[ "$lsb_dist" = "centos" ]]; then
# CentOS packages are putting runc binary in different place...
iniset -sudo ${CRIO_CONF} crio.runtime runtime \"/usr/sbin/runc\"
iniset -sudo ${crio_conf} crio.runtime runtime \"/usr/sbin/runc\"
# CentOS version seems to only work with cgroupfs...
iniset -sudo ${CRIO_CONF} crio.runtime cgroup_manager \"cgroupfs\"
iniset -sudo ${crio_conf} crio.runtime cgroup_manager \"cgroupfs\"
fi
fi
@@ -176,46 +113,5 @@ function stop_crio {
sudo systemctl stop crio.service || true
}
function _update_config {
sudo -E python3 - <<EOF
"""
Update provided by CRIO_KEY key list in crio configuration in a form of:
some_key = [ some,
value
]
or just an empty list:
some_key = [
]
with the CRIO_VAL value.
Note, CRIO_VAL must include square brackets.
"""
import os
import re
crio_key = os.environ.get('CRIO_KEY')
crio_val = os.environ.get('CRIO_VAL')
crio_conf = os.environ.get('CRIO_CONF')
pat = re.compile(rf'{crio_key}\s*=\s*\[[^\]]*\]', flags=re.S | re.M)
with open(crio_conf) as fobj:
conf = fobj.read()
with open(crio_conf, 'w') as fobj:
search = pat.search(conf)
if search:
start, end = search.span()
conf = conf[:start] + f'{crio_key} = {crio_val}' + conf[end:]
fobj.write(conf)
EOF
}
# Restore xtrace
$_XTRACE_DOCKER

View File

@@ -24,8 +24,7 @@ set +o xtrace
DOCKER_ENGINE_SOCKET_FILE=${DOCKER_ENGINE_SOCKET_FILE:-/var/run/docker.sock}
DOCKER_ENGINE_PORT=${DOCKER_ENGINE_PORT:-2375}
DOCKER_CLUSTER_STORE=${DOCKER_CLUSTER_STORE:-}
STACK_GROUP="$( id --group --name "$STACK_USER" )"
DOCKER_GROUP=${DOCKER_GROUP:-$STACK_GROUP}
DOCKER_GROUP=${DOCKER_GROUP:-$STACK_USER}
DOCKER_CGROUP_DRIVER=${DOCKER_CGROUP_DRIVER:-}
# TODO(hongbin): deprecate and remove clear container
ENABLE_CLEAR_CONTAINER=$(trueorfalse False ENABLE_CLEAR_CONTAINER)
@@ -57,10 +56,8 @@ function install_docker {
local lsb_dist=${os_VENDOR,,}
local dist_version=${os_CODENAME}
if [[ "$lsb_dist" != "centosstream" ]]; then
local arch
arch=$(dpkg --print-architecture)
fi
local arch
arch=$(dpkg --print-architecture)
if is_ubuntu; then
apt_get install apparmor
if [[ ${dist_version} == 'trusty' ]]; then
@@ -77,27 +74,12 @@ function install_docker {
${dist_version} \
stable"
REPOS_UPDATED=False apt_get_update
if [ -n "${UBUNTU_DOCKER_VERSION}" ]; then
apt_get install docker-ce=$UBUNTU_DOCKER_VERSION
else
apt_get install docker-ce
fi
apt_get install docker-ce
elif is_fedora; then
if [[ "$lsb_dist" = "centos" ]]; then
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
elif [[ "$lsb_dist" = "centosstream" ]]; then
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
sudo yum-config-manager \
--add-repo \
https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 #noqa
sudo yum-config-manager \
--enable \
packages.cloud.google.com_yum_repos_kubernetes-el7-x86_64
sudo dnf -y install kubeadm --nogpgcheck
elif [[ "$lsb_dist" = "fedora" ]]; then
sudo dnf config-manager \
--add-repo \
@@ -236,7 +218,7 @@ ExecStart=/usr/bin/dockerd --config-file=$docker_config_file
Environment="HTTP_PROXY=$http_proxy" "HTTPS_PROXY=$https_proxy" "NO_PROXY=$no_proxy"
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker.service
sudo systemctl --no-block restart docker.service
}
function configure_containerd {

View File

@@ -27,7 +27,7 @@ K8S_NODE_IP=${K8S_NODE_IP:-$HOST_IP}
K8S_API_SERVER_PORT=${K8S_API_SERVER_PORT:-6443}
K8S_POD_NETWORK_CIDR=${K8S_POD_NETWORK_CIDR:-10.244.0.0/16}
K8S_SERVICE_NETWORK_CIDR=${K8S_SERVICE_NETWORK_CIDR:-10.96.0.0/12}
K8S_VERSION=${K8S_VERSION:-1.23.16-00}
K8S_VERSION=${K8S_VERSION:-1.19.0-00}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
# Functions
@@ -60,18 +60,8 @@ function install_kubeadm {
function kubeadm_init {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
CGROUP_DRIVER=$(iniget "/etc/crio/crio.conf" crio.runtime cgroup_manager)
CRI_SOCKET="unix:///var/run/crio/crio.sock"
else
# docker is used
CGROUP_DRIVER=$(docker info -f '{{.CgroupDriver}}')
CRI_SOCKET="/var/run/dockershim.sock"
fi
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta2
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
imageRepository: "${KUBEADMIN_IMAGE_REPOSITORY}"
etcd:
@@ -82,7 +72,7 @@ networking:
podSubnet: "${K8S_POD_NETWORK_CIDR}"
serviceSubnet: "${K8S_SERVICE_NETWORK_CIDR}"
---
apiVersion: kubeadm.k8s.io/v1beta2
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- token: "${K8S_TOKEN}"
ttl: 0s
@@ -90,19 +80,10 @@ kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "${K8S_API_SERVER_IP}"
bindPort: ${K8S_API_SERVER_PORT}
nodeRegistration:
criSocket: "$CRI_SOCKET"
kubeletExtraArgs:
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $CGROUP_DRIVER
EOF
sudo kubeadm config images pull --image-repository=${KUBEADMIN_IMAGE_REPOSITORY}
sudo kubeadm init --config $kubeadm_config_file --ignore-preflight-errors Swap
@@ -113,25 +94,15 @@ EOF
safe_chown $STACK_USER:$STACK_USER $kube_config_file
if [[ "$K8S_NETWORK_ADDON" == "flannel" ]]; then
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/4ff77dc7c35851913587f7daccf25d754e77aa65/Documentation/kube-flannel.yml
fi
}
function kubeadm_join {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
CGROUP_DRIVER=$(iniget "/etc/crio/crio.conf" crio.runtime cgroup_manager)
CRI_SOCKET="unix:///var/run/crio/crio.sock"
else
# docker is used
CGROUP_DRIVER=$(docker info -f '{{.CgroupDriver}}')
CRI_SOCKET="/var/run/dockershim.sock"
fi
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta2
apiVersion: kubeadm.k8s.io/v1beta1
kind: JoinConfiguration
discovery:
bootstrapToken:
@@ -139,19 +110,10 @@ discovery:
token: "${K8S_TOKEN}"
unsafeSkipCAVerification: true
tlsBootstrapToken: "${K8S_TOKEN}"
nodeRegistration:
criSocket: "$CRI_SOCKET"
kubeletExtraArgs:
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $CGROUP_DRIVER
EOF
sudo kubeadm join --config $kubeadm_config_file --ignore-preflight-errors Swap
}

View File

@@ -9,12 +9,7 @@ ENABLE_LIVE_RESTORE=${ENABLE_LIVE_RESTORE:-false}
ENABLE_IPV6=${ENABLE_IPV6:-false}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
ENABLE_CONTAINERD_CRI=${ENABLE_CONTAINERD_CRI:-false}
CRIO_VERSION=${CRIO_VERSION:-"1.23:/1.23.0"}
CRIO_ALLOW_ICMP=${CRIO_ALLOW_ICMP:-true}
CNI_CONF_DIR=${CNI_CONF_DIR:-}
CNI_PLUGIN_DIR=${CNI_PLUGIN_DIR:-}
UBUNTU_DOCKER_VERSION=${UBUNTU_DOCKER_VERSION:-}
CRIO_VERSION=${CRIO_VERSION:-"1.18:/1.18.0"}
# Enable container services
enable_service container
@@ -28,8 +23,4 @@ if [[ ,${ENABLED_SERVICES} =~ ,"k8s-master" ]]; then
fi
# Customize kubeadm container images repository
KUBEADMIN_IMAGE_REPOSITORY=${KUBEADMIN_IMAGE_REPOSITORY:-"registry.k8s.io"}
# Configure crio pause image
CRIO_PAUSE_IMAGE=${CRIO_PAUSE_IMAGE:-"registry.k8s.io/pause:3.6"}
CRIO_PAUSE_COMMAND=${CRIO_PAUSE_COMMAND:-"/pause"}
KUBEADMIN_IMAGE_REPOSITORY=${KUBEADMIN_IMAGE_REPOSITORY:-"k8s.gcr.io"}

View File

@@ -1,5 +1,5 @@
[tox]
minversion = 3.18.0
minversion = 1.6
skipsdist = True
envlist = bashate
@@ -14,7 +14,7 @@ basepython = python3
# modified bashate tree
deps =
{env:BASHATE_INSTALL_PATH:bashate==0.5.1}
allowlist_externals = bash
whitelist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
-not \( -type d -name doc -prune \) \