2 Commits

Author SHA1 Message Date
Hongbin Lu
e37ec8fa5e Tolerate non-existing of cni config file
Change-Id: I761bf9344651ec196471ca57bf0b29184a69e161
(cherry picked from commit 9620216b35)
2020-05-06 01:05:11 +00:00
c86dae7f57 Update .gitreview for stable/ussuri
Change-Id: I14f47a83653233325643649ef7789f15ab62b192
2020-04-28 17:09:29 +00:00
7 changed files with 45 additions and 221 deletions

View File

@@ -2,4 +2,4 @@
host=review.opendev.org
port=29418
project=openstack/devstack-plugin-container.git
defaultbranch=stable/2023.1
defaultbranch=stable/ussuri

View File

@@ -7,6 +7,7 @@
timeout: 4200
required-projects:
- openstack/devstack
- openstack/devstack-gate
- openstack/devstack-plugin-container
vars:
devstack_localrc:
@@ -17,13 +18,14 @@
- job:
name: devstack-plugin-container-k8s
parent: devstack-minimal
nodeset: openstack-two-node-focal
nodeset: openstack-two-node-bionic
pre-run: playbooks/devstack-plugin-container-k8s/pre.yaml
run: playbooks/devstack-plugin-container-k8s/run.yaml
post-run: playbooks/devstack-plugin-container-k8s/post.yaml
timeout: 7200
required-projects:
- openstack/devstack
- openstack/devstack-gate
- openstack/devstack-plugin-container
vars:
devstack_services:

View File

@@ -20,7 +20,6 @@ set +o xtrace
# --------
CRIO_ENGINE_SOCKET_FILE=${CRIO_ENGINE_SOCKET_FILE:-/var/run/crio/crio.sock}
CRIO_ALLOW_ICMP=$(trueorfalse True CRIO_ALLOW_ICMP)
# Functions
# ---------
@@ -41,22 +40,14 @@ function install_crio {
local lsb_dist=${os_VENDOR,,}
local dist_version=${os_CODENAME}
local kubic_obs_project_key="2472d6d0d2f66af87aba8da34d64390375060aa4"
local os="x${os_VENDOR}_${os_RELEASE}"
local arch
arch=$(dpkg --print-architecture)
if is_ubuntu; then
apt_get install apt-transport-https ca-certificates \
software-properties-common
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--recv ${kubic_obs_project_key}
sudo apt-add-repository -y "deb https://download.opensuse.org/"`
`"repositories/devel:/kubic:/libcontainers:/stable/${os}/ /"
sudo apt-add-repository -y "deb http://download.opensuse.org/"`
`"repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/"`
`"${CRIO_VERSION}/${os}/ /"
apt_get install apt-transport-https ca-certificates software-properties-common
sudo add-apt-repository -y ppa:projectatomic/ppa
# Installing podman and containerd will get us compatible versions of
# cri-o and runc. And we need podman to manage container images anyway.
apt_get install podman buildah cri-o-runc cri-o
apt_get install podman buildah
elif is_fedora; then
if [[ "$lsb_dist" = "centos" ]]; then
sudo yum-config-manager \
@@ -74,85 +65,46 @@ function configure_crio {
# After an ./unstack it will be stopped. So it is ok if it returns exit-code == 1
sudo systemctl stop crio.service || true
export CRIO_CONF="/etc/crio/crio.conf"
local crio_conf
crio_conf=/etc/crio/crio.conf
# We're wrapping values in \"<val>\" because that's the format cri-o wants.
iniset -sudo ${CRIO_CONF} crio.api listen \"${CRIO_ENGINE_SOCKET_FILE}\"
iniset -sudo ${CRIO_CONF} crio.image pause_image \"${CRIO_PAUSE_IMAGE}\"
iniset -sudo ${CRIO_CONF} crio.image pause_command \"${CRIO_PAUSE_COMMAND}\"
iniset -sudo ${crio_conf} crio.api listen \"${CRIO_ENGINE_SOCKET_FILE}\"
if [[ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]]; then
# debug is way too verbose, info will be enough
iniset -sudo ${CRIO_CONF} crio.runtime log_level \"info\"
iniset -sudo ${crio_conf} crio.runtime log_level \"info\"
fi
if is_ubuntu; then
local crio_minor=${CRIO_VERSION#*.}
# In Ubuntu's a special vendored version of runc is installed with
# cri-o. This means that it'll not work with the system's version of
# runc. Moreover vendored runc is not placed into /usr/bin, where
# crio.conf states that it will be. We fix that by linking the vendored
# binary to /usr/bin.
if [[ ! -e /usr/bin/runc ]]; then
sudo ln -s /usr/lib/cri-o-runc/sbin/runc /usr/bin/runc
sudo chmod +x /usr/bin/runc
fi
# At least for 18.04 we need to set up /etc/containers/registries.conf
# with some initial content. That's another bug with that PPA.
local registries_conf
registries_conf="/etc/containers/registries.conf"
if [[ ! -f ${registries_conf} && $crio_minor -lt 24 ]]; then
if [[ ! -f ${registries_conf} ]]; then
sudo mkdir -p `dirname ${registries_conf}`
cat << EOF | sudo tee ${registries_conf}
[registries.search]
registries = ['docker.io']
EOF
else
# If there is a config file, that means, we are probably on the
# newer version of crio/container/podman, which basically means
# we cannot mix [registries.search] registries filled with
# something and unqualified-search-registries setting which appear
# on sysregistry v2 config syntax. And because it's a TOML now, we
# cannot rely on iniset, but directly change the file.
local rname='unqualified-search-registries'
local rval='["docker.io", "quay.io"]'
if [[ ! -f ${registries_conf} ]]; then
cat << EOF | sudo tee ${registries_conf}
unqualified-search-registries = ["docker.io", "quay.io"]
EOF
elif grep -wq "^${rname}" "${registries_conf}"; then
sudo sed -i -e \
"s/^${rname}.*$/${rname} = ${rval}/" "${registries_conf}"
else
sudo sed -i "1s/^/${rname} = ${rval}\n/" "${registries_conf}"
fi
fi
# CRI-O from kubic repo have placed runc in different place, not even
# in path, just to not conflict with runc package from official repo.
# We need to change it.
iniset -sudo ${CRIO_CONF} crio.runtime.runtimes.runc runtime_path \
\"/usr/lib/cri-o-runc/sbin/runc\"
if [ -n "${CNI_CONF_DIR}" ]; then
iniset -sudo ${CRIO_CONF} crio.network network_dir \
\"${CNI_CONF_DIR}\"
fi
if [ -n "${CNI_PLUGIN_DIR}" ]; then
iniset -sudo ${CRIO_CONF} crio.network plugin_dir \
\"${CNI_PLUGIN_DIR}\"
fi
# By default CRI-O doesn't allow ICMP between containers, although it
# is ususally expected for testing purposes.
if [ "${CRIO_ALLOW_ICMP}" == "True" ]; then
if grep -wq '^default_sysctls' ${CRIO_CONF}; then
export CRIO_KEY="default_sysctls"
export CRIO_VAL='[ "net.ipv4.ping_group_range=0 2147483647", ]'
_update_config
else
iniset -sudo ${CRIO_CONF} crio.runtime default_sysctls \
'[ "net.ipv4.ping_group_range=0 2147483647", ]'
fi
fi
elif is_fedora; then
local lsb_dist=${os_VENDOR,,}
if [[ "$lsb_dist" = "centos" ]]; then
# CentOS packages are putting runc binary in different place...
iniset -sudo ${CRIO_CONF} crio.runtime runtime \"/usr/sbin/runc\"
iniset -sudo ${crio_conf} crio.runtime runtime \"/usr/sbin/runc\"
# CentOS version seems to only work with cgroupfs...
iniset -sudo ${CRIO_CONF} crio.runtime cgroup_manager \"cgroupfs\"
iniset -sudo ${crio_conf} crio.runtime cgroup_manager \"cgroupfs\"
fi
fi
@@ -163,46 +115,5 @@ function stop_crio {
sudo systemctl stop crio.service || true
}
function _update_config {
sudo -E python3 - <<EOF
"""
Update provided by CRIO_KEY key list in crio configuration in a form of:
some_key = [ some,
value
]
or just an empty list:
some_key = [
]
with the CRIO_VAL value.
Note, CRIO_VAL must include square brackets.
"""
import os
import re
crio_key = os.environ.get('CRIO_KEY')
crio_val = os.environ.get('CRIO_VAL')
crio_conf = os.environ.get('CRIO_CONF')
pat = re.compile(rf'{crio_key}\s*=\s*\[[^\]]*\]', flags=re.S | re.M)
with open(crio_conf) as fobj:
conf = fobj.read()
with open(crio_conf, 'w') as fobj:
search = pat.search(conf)
if search:
start, end = search.span()
conf = conf[:start] + f'{crio_key} = {crio_val}' + conf[end:]
fobj.write(conf)
EOF
}
# Restore xtrace
$_XTRACE_DOCKER

View File

@@ -24,8 +24,7 @@ set +o xtrace
DOCKER_ENGINE_SOCKET_FILE=${DOCKER_ENGINE_SOCKET_FILE:-/var/run/docker.sock}
DOCKER_ENGINE_PORT=${DOCKER_ENGINE_PORT:-2375}
DOCKER_CLUSTER_STORE=${DOCKER_CLUSTER_STORE:-}
STACK_GROUP="$( id --group --name "$STACK_USER" )"
DOCKER_GROUP=${DOCKER_GROUP:-$STACK_GROUP}
DOCKER_GROUP=${DOCKER_GROUP:-$STACK_USER}
DOCKER_CGROUP_DRIVER=${DOCKER_CGROUP_DRIVER:-}
# TODO(hongbin): deprecate and remove clear container
ENABLE_CLEAR_CONTAINER=$(trueorfalse False ENABLE_CLEAR_CONTAINER)
@@ -34,7 +33,6 @@ ENABLE_CONTAINERD_CRI=$(trueorfalse False ENABLE_CONTAINERD_CRI)
ENABLE_LIVE_RESTORE=$(trueorfalse False ENABLE_LIVE_RESTORE)
ENABLE_IPV6=$(trueorfalse False ENABLE_IPV6)
KATA_BRANCH=${KATA_BRANCH:-master}
KATA_RUNTIME=${KATA_RUNTIME:-kata-runtime}
CONTAINERD_CONF_DIR=/etc/containerd
CONTAINERD_CONF=$CONTAINERD_CONF_DIR/config.toml
@@ -57,12 +55,9 @@ function install_docker {
local lsb_dist=${os_VENDOR,,}
local dist_version=${os_CODENAME}
if [[ "$lsb_dist" != "centosstream" ]]; then
local arch
arch=$(dpkg --print-architecture)
fi
local arch
arch=$(dpkg --print-architecture)
if is_ubuntu; then
apt_get install apparmor
if [[ ${dist_version} == 'trusty' ]]; then
if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
apt_get install linux-image-extra-$(uname -r) linux-image-extra-virtual
@@ -77,27 +72,12 @@ function install_docker {
${dist_version} \
stable"
REPOS_UPDATED=False apt_get_update
if [ -n "${UBUNTU_DOCKER_VERSION}" ]; then
apt_get install docker-ce=$UBUNTU_DOCKER_VERSION
else
apt_get install docker-ce
fi
apt_get install docker-ce
elif is_fedora; then
if [[ "$lsb_dist" = "centos" ]]; then
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
elif [[ "$lsb_dist" = "centosstream" ]]; then
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
sudo yum-config-manager \
--add-repo \
https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 #noqa
sudo yum-config-manager \
--enable \
packages.cloud.google.com_yum_repos_kubernetes-el7-x86_64
sudo dnf -y install kubeadm --nogpgcheck
elif [[ "$lsb_dist" = "fedora" ]]; then
sudo dnf config-manager \
--add-repo \
@@ -164,11 +144,11 @@ function configure_docker {
if [[ "$ENABLE_KATA_CONTAINERS" == "True" ]]; then
if sudo grep -E 'svm|vmx' /proc/cpuinfo &> /dev/null; then
runtime_opts+="\"runtimes\": {
\"$KATA_RUNTIME\": {
\"kata-runtime\": {
\"path\": \"/usr/bin/kata-runtime\"
}
},
\"default-runtime\": \"$KATA_RUNTIME\","
\"default-runtime\": \"kata-runtime\","
fi
# TODO(hongbin): deprecate and remove clear container
elif [[ "$ENABLE_CLEAR_CONTAINER" == "True" ]]; then
@@ -236,32 +216,17 @@ ExecStart=/usr/bin/dockerd --config-file=$docker_config_file
Environment="HTTP_PROXY=$http_proxy" "HTTPS_PROXY=$https_proxy" "NO_PROXY=$no_proxy"
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker.service
sudo systemctl --no-block restart docker.service
}
function configure_containerd {
sudo mkdir -p $CONTAINERD_CONF_DIR
sudo chown -R $STACK_USER $CONTAINERD_CONF_DIR
containerd config default > $CONTAINERD_CONF
stack_user_gid=$(getent group $STACK_USER | cut -d: -f3)
cat <<EOF | sudo tee $CONTAINERD_CONF >/dev/null
[grpc]
gid = $stack_user_gid
[debug]
level = "debug"
EOF
if [[ "$ENABLE_KATA_CONTAINERS" == "True" ]]; then
cat <<EOF | sudo tee -a $CONTAINERD_CONF >/dev/null
[plugins]
[plugins.cri]
[plugins.cri.containerd]
[plugins.cri.containerd.runtimes.${KATA_RUNTIME}]
runtime_type = "io.containerd.kata.v2"
EOF
fi
sed -i "s/gid = [0-9]*/gid = ${stack_user_gid}/" $CONTAINERD_CONF
sed -i "s/level = \"\"/level = \"debug\"/" $CONTAINERD_CONF
sudo systemctl --no-block restart containerd.service
}
@@ -271,7 +236,6 @@ function stop_docker {
function cleanup_docker {
uninstall_package docker-ce
rm -f $CONTAINERD_CONF
}
# TODO(hongbin): deprecate and remove clear container

View File

@@ -27,7 +27,7 @@ K8S_NODE_IP=${K8S_NODE_IP:-$HOST_IP}
K8S_API_SERVER_PORT=${K8S_API_SERVER_PORT:-6443}
K8S_POD_NETWORK_CIDR=${K8S_POD_NETWORK_CIDR:-10.244.0.0/16}
K8S_SERVICE_NETWORK_CIDR=${K8S_SERVICE_NETWORK_CIDR:-10.96.0.0/12}
K8S_VERSION=${K8S_VERSION:-1.23.16-00}
K8S_VERSION=${K8S_VERSION:-1.14.1-00}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
# Functions
@@ -60,20 +60,9 @@ function install_kubeadm {
function kubeadm_init {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
CGROUP_DRIVER=$(iniget "/etc/crio/crio.conf" crio.runtime cgroup_manager)
CRI_SOCKET="unix:///var/run/crio/crio.sock"
else
# docker is used
CGROUP_DRIVER=$(docker info -f '{{.CgroupDriver}}')
CRI_SOCKET="/var/run/dockershim.sock"
fi
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta2
cat <<EOF | sudo tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
imageRepository: "${KUBEADMIN_IMAGE_REPOSITORY}"
etcd:
external:
endpoints:
@@ -82,7 +71,7 @@ networking:
podSubnet: "${K8S_POD_NETWORK_CIDR}"
serviceSubnet: "${K8S_SERVICE_NETWORK_CIDR}"
---
apiVersion: kubeadm.k8s.io/v1beta2
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- token: "${K8S_TOKEN}"
ttl: 0s
@@ -90,21 +79,11 @@ kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "${K8S_API_SERVER_IP}"
bindPort: ${K8S_API_SERVER_PORT}
nodeRegistration:
criSocket: "$CRI_SOCKET"
kubeletExtraArgs:
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $CGROUP_DRIVER
EOF
sudo kubeadm config images pull --image-repository=${KUBEADMIN_IMAGE_REPOSITORY}
sudo kubeadm init --config $kubeadm_config_file --ignore-preflight-errors Swap
local kube_config_file=$HOME/.kube/config
@@ -113,25 +92,15 @@ EOF
safe_chown $STACK_USER:$STACK_USER $kube_config_file
if [[ "$K8S_NETWORK_ADDON" == "flannel" ]]; then
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml
fi
}
function kubeadm_join {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
CGROUP_DRIVER=$(iniget "/etc/crio/crio.conf" crio.runtime cgroup_manager)
CRI_SOCKET="unix:///var/run/crio/crio.sock"
else
# docker is used
CGROUP_DRIVER=$(docker info -f '{{.CgroupDriver}}')
CRI_SOCKET="/var/run/dockershim.sock"
fi
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta2
cat <<EOF | sudo tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta1
kind: JoinConfiguration
discovery:
bootstrapToken:
@@ -139,19 +108,10 @@ discovery:
token: "${K8S_TOKEN}"
unsafeSkipCAVerification: true
tlsBootstrapToken: "${K8S_TOKEN}"
nodeRegistration:
criSocket: "$CRI_SOCKET"
kubeletExtraArgs:
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $CGROUP_DRIVER
EOF
sudo kubeadm join --config $kubeadm_config_file --ignore-preflight-errors Swap
}

View File

@@ -9,12 +9,6 @@ ENABLE_LIVE_RESTORE=${ENABLE_LIVE_RESTORE:-false}
ENABLE_IPV6=${ENABLE_IPV6:-false}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
ENABLE_CONTAINERD_CRI=${ENABLE_CONTAINERD_CRI:-false}
CRIO_VERSION=${CRIO_VERSION:-"1.23:/1.23.0"}
CRIO_ALLOW_ICMP=${CRIO_ALLOW_ICMP:-true}
CNI_CONF_DIR=${CNI_CONF_DIR:-}
CNI_PLUGIN_DIR=${CNI_PLUGIN_DIR:-}
UBUNTU_DOCKER_VERSION=${UBUNTU_DOCKER_VERSION:-}
# Enable container services
enable_service container
@@ -26,10 +20,3 @@ if [[ ,${ENABLED_SERVICES} =~ ,"k8s-master" ]]; then
enable_service kube-scheduler
enable_service kube-proxy
fi
# Customize kubeadm container images repository
KUBEADMIN_IMAGE_REPOSITORY=${KUBEADMIN_IMAGE_REPOSITORY:-"registry.k8s.io"}
# Configure crio pause image
CRIO_PAUSE_IMAGE=${CRIO_PAUSE_IMAGE:-"registry.k8s.io/pause:3.6"}
CRIO_PAUSE_COMMAND=${CRIO_PAUSE_COMMAND:-"/pause"}

View File

@@ -1,5 +1,5 @@
[tox]
minversion = 3.18.0
minversion = 1.6
skipsdist = True
envlist = bashate
@@ -14,7 +14,7 @@ basepython = python3
# modified bashate tree
deps =
{env:BASHATE_INSTALL_PATH:bashate==0.5.1}
allowlist_externals = bash
whitelist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
-not \( -type d -name doc -prune \) \