openstack-helm

2023-11-02

openstack-helm 安装

helm有2个版本,分别为helm2和helm3,目前openstack-helm只支持helm2,因此:下载得helm版本为helm-v2.17.0

wget https://get.helm.sh/helm-v2.17.0-linux-amd64.tar.gz
tar -xvf helm-v2.17.0-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/

helm

helm 初始化

# 要安装 Helm 的服务端程序,我们需要使用到kubectl工具,所以先确保kubectl工具能够正常的访问 kubernetes 集群的apiserver哦
# 初始化helm仓库
helm init --upgrade -i registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.17.0 --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm serve &

# 另外一个值得注意的问题是RBAC,我们的 kubernetes 集群是1.8.x版本的,默认开启了RBAC访问控制,所以我们需要为Tiller创建一个ServiceAccount,让他拥有执行的权限,详细内容可以查看 Helm 文档中的Role-based Access Control。 创建rbac.yaml文件创建helm集群账户和角色
vim service-accounts.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: tiller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: tiller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: tiller
    namespace: kube-system

kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default

openstack-helm下载

cd /opt
git clone https://github.com/openstack/openstack-helm.git
git clone https://github.com/openstack/openstack-helm-infra.git

openstack组件安装

安装依赖包
yum -y install make jq git curl

修改openstack安装版本

cd /opt/openstack-helm
git diff tools/deployment/common/setup-client.sh
-  -c${UPPER_CONSTRAINTS_FILE:=https://releases.openstack.org/constraints/upper/${OPENSTACK_RELEASE:-stein}} \
+  -c${UPPER_CONSTRAINTS_FILE:=https://releases.openstack.org/constraints/upper/${OPENSTACK_RELEASE:-ussuri}} \
 
 
git diff tools/deployment/common/get-values-overrides.sh
-: "${OPENSTACK_RELEASE:="train"}"
+: "${OPENSTACK_RELEASE:="ussuri"}"

创建OpenStack clients 和 Kubernetes RBAC rules

./tools/deployment/developer/common/020-setup-client.sh

helm再部署过程中,会依赖nodes label进行调度,因此需要给节点添加标签

kubectl label nodes 10.2.11.176 nginx-ingress=enabled
kubectl label nodes 10.2.11.177 nginx-ingress=enabled
kubectl label nodes 10.2.11.176 openstack-control-plane=enabled
kubectl label nodes 10.2.11.177 openstack-control-plane=enabled
kubectl label nodes 10.2.11.178 openstack-control-plane=enabled
kubectl label nodes 10.2.11.176 openstack-compute-node=enabled
kubectl label nodes 10.2.11.177 openstack-compute-node=enabled
kubectl label nodes 10.2.11.178 openstack-compute-node=enabled
kubectl label nodes 10.2.11.176 openvswitch=enabled
kubectl label nodes 10.2.11.177 openvswitch=enabled
kubectl label nodes 10.2.11.178 openvswitch=enabled

安装ingress

# 修改ingress镜像,原始镜像被墙了,下载不下来
cd /opt/openstack-helm-infra/
git diff ingress/values.yaml
-    ingress: k8s.gcr.io/ingress-nginx/controller:v0.42.0
-    ingress_module_init: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
-    ingress_routed_vip: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
-    error_pages: k8s.gcr.io/defaultbackend:1.4
+    ingress: docker.io/willdockerhub/ingress-nginx-controller:v0.42.0
+    ingress_module_init: docker.io/openstackhelm/neutron:ussuri-ubuntu_bionic
+    ingress_routed_vip: docker.io/openstackhelm/neutron:ussuri-ubuntu_bionic
+    error_pages: docker.io/chenliujin/defaultbackend:1.4
 labels:
   server:
-    node_selector_key: openstack-control-plane
+    node_selector_key: nginx-ingress
     node_selector_value: enabled
   error_server:
-    node_selector_key: openstack-control-plane
+    node_selector_key: nginx-ingress
     node_selector_value: enabled
-    addr: 172.18.0.1/11
+    addr: 10.221.0.1/11

执行部署脚本

OSH_DEPLOY_MULTINODE=True ./tools/deployment/component/common/ingress.sh

安装ceph

修改openstack-helm ceph脚本

git diff tools/deployment/multinode/030-ceph.sh
diff --git a/tools/deployment/multinode/030-ceph.sh b/tools/deployment/multinode/030-ceph.sh
index b3fa8db2..f7cc9ed8 100755
--- a/tools/deployment/multinode/030-ceph.sh
+++ b/tools/deployment/multinode/030-ceph.sh
@@ -37,13 +37,13 @@ network:
   cluster: ${CEPH_CLUSTER_NETWORK}
 deployment:
   storage_secrets: true
-  ceph: true
-  rbd_provisioner: true
-  csi_rbd_provisioner: true
+  ceph: false
+  rbd_provisioner: false
+  csi_rbd_provisioner: false
   cephfs_provisioner: false
   client_secrets: false
 bootstrap:
-  enabled: true
+  enabled: false
 conf:
   ceph:
     global:
@@ -74,7 +74,7 @@ manifests:
 EOF
 
 : ${OSH_INFRA_PATH:="../openstack-helm-infra"}
-for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
+for CHART in ceph-mon ; do
   make -C ${OSH_INFRA_PATH} ${CHART}
   helm upgrade --install ${CHART} ${OSH_INFRA_PATH}/${CHART} \
     --namespace=ceph \
@@ -82,14 +82,4 @@ for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
     ${OSH_EXTRA_HELM_ARGS} \
     ${OSH_EXTRA_HELM_ARGS_CEPH_DEPLOY}
 
-  #NOTE: Wait for deploy
-  ./tools/deployment/common/wait-for-pods.sh ceph 1200
-
-  #NOTE: Validate deploy
-  MON_POD=$(kubectl get pods \
-    --namespace=ceph \
-    --selector="application=ceph" \
-    --selector="component=mon" \
-    --no-headers | awk '{ print $1; exit }')
-  kubectl exec -n ceph ${MON_POD} -- ceph -s
 done
 
git diff tools/deployment/multinode/kube-node-subnet.sh
diff --git a/tools/deployment/multinode/kube-node-subnet.sh b/tools/deployment/multinode/kube-node-subnet.sh
index 08f069a8..9ed56742 100755
--- a/tools/deployment/multinode/kube-node-subnet.sh
+++ b/tools/deployment/multinode/kube-node-subnet.sh
@@ -19,7 +19,6 @@ kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="
 function run_and_log_ipcalc {
   POD_NAME="tmp-$(cat /dev/urandom | env LC_CTYPE=C tr -dc a-z | head -c 5; echo)"
   kubectl run ${POD_NAME} \
-    --generator=run-pod/v1 \
     --wait \
     --image ${UTILS_IMAGE} \
     --restart=Never \

修改openstack-helm-infra ceph镜像名称

git diff ceph-client/values.yaml
diff --git a/ceph-client/values.yaml b/ceph-client/values.yaml
index 92c31611..a6920dcb 100644
--- a/ceph-client/values.yaml
+++ b/ceph-client/values.yaml
@@ -24,11 +24,11 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_mds: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_mds: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
 
git diff ceph-mon/values.yaml
diff --git a/ceph-mon/values.yaml b/ceph-mon/values.yaml
index f060c13a..7d284d67 100644
--- a/ceph-mon/values.yaml
+++ b/ceph-mon/values.yaml
@@ -23,10 +23,10 @@ deployment:
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_mon: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_mon: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
 
git diff ceph-osd/values.yaml
diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml
index 7277a73c..906810e3 100644
--- a/ceph-osd/values.yaml
+++ b/ceph-osd/values.yaml
@@ -19,9 +19,9 @@
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_osd: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    ceph_osd: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
 
git diff ceph-provisioners/values.yaml
diff --git a/ceph-provisioners/values.yaml b/ceph-provisioners/values.yaml
index b4ab0a9d..a6f9c0b7 100644
--- a/ceph-provisioners/values.yaml
+++ b/ceph-provisioners/values.yaml
@@ -30,10 +30,10 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
     ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20200521'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:change_770201_ubuntu_bionic-20210113'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:latest-ubuntu_bionic'
     csi_provisioner: 'quay.io/k8scsi/csi-provisioner:v1.6.0'
     csi_snapshotter: 'quay.io/k8scsi/csi-snapshotter:v2.1.1'
     csi_attacher: 'quay.io/k8scsi/csi-attacher:v2.1.1'
 
git diff ceph-rgw/values.yaml
diff --git a/ceph-rgw/values.yaml b/ceph-rgw/values.yaml
index a5147856..21aedfec 100644
--- a/ceph-rgw/values.yaml
+++ b/ceph-rgw/values.yaml
@@ -24,13 +24,13 @@ release_group: null
 images:
   pull_policy: IfNotPresent
   tags:
-    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
-    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
+    ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
+    ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:latest-ubuntu_bionic'
     dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
     image_repo_sync: 'docker.io/library/docker:17.07.0'
-    rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
-    rgw_placement_targets: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
+    rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
+    rgw_placement_targets: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic'
     ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
     ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
     ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'

修改ceph-mon模板文件

git diff ceph-mon/templates/bin/mon/_start.sh.tpl
diff --git a/ceph-mon/templates/bin/mon/_start.sh.tpl b/ceph-mon/templates/bin/mon/_start.sh.tpl
index b045a39e..96745c2b 100644
--- a/ceph-mon/templates/bin/mon/_start.sh.tpl
+++ b/ceph-mon/templates/bin/mon/_start.sh.tpl
@@ -28,49 +28,6 @@ if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
   exit 1
 fi
 
-if [[ -z "$MON_IP" ]]; then
-  echo "ERROR- MON_IP must be defined as the IP address of the monitor"
-  exit 1
-fi
-
-if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
-    MON_NAME=${POD_NAME}
-else
-    MON_NAME=${NODE_NAME}
-fi
-MON_DATA_DIR="/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}"
-MONMAP="/etc/ceph/monmap-${CLUSTER}"
-
-# Make the monitor directory
-/bin/sh -c "mkdir -p \"${MON_DATA_DIR}\""
-
-function get_mon_config {
-  # Get fsid from ceph.conf
-  local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
-
-  timeout=10
-  MONMAP_ADD=""
-
-  while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
-    # Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
-    if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
-        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.metadata.name}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}")
-    else
-        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.spec.nodeName}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}")
-    fi
-    (( timeout-- ))
-    sleep 1
-  done
-
-  if [[ -z "${MONMAP_ADD// }" ]]; then
-      exit 1
-  fi
-
-  # Create a monmap with the Pod Names and IP
-  monmaptool --create ${MONMAP_ADD} --fsid ${fsid} ${MONMAP} --clobber
-}
-
-get_mon_config
 
 # If we don't have a monitor keyring, this is a new monitor
 if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
@@ -81,33 +38,9 @@ if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
     cp -vf ${MON_KEYRING}.seed ${MON_KEYRING}
   fi
 
-  if [ ! -e ${MONMAP} ]; then
-    echo "ERROR- ${MONMAP} must exist. You can extract it from your current monitor by running 'ceph mon getmap -o ${MONMAP}' or use a KV Store"
-    exit 1
-  fi
-
   # Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
   for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}; do
     ceph-authtool ${MON_KEYRING} --import-keyring ${KEYRING}
   done
-
-  # Prepare the monitor daemon's directory with the map and keyring
-  ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" --mkfs -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
-else
-  echo "Trying to get the most recent monmap..."
-  # Ignore when we timeout, in most cases that means the cluster has no quorum or
-  # no mons are up and running yet
-  timeout 5 ceph --cluster "${CLUSTER}" mon getmap -o ${MONMAP} || true
-  ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
-  timeout 7 ceph --cluster "${CLUSTER}" mon add "${MON_NAME}" "${MON_IP}:${MON_PORT_V2}" || true
 fi
 
-# start MON
-exec /usr/bin/ceph-mon \
-  --cluster "${CLUSTER}" \
-  --setuser "ceph" \
-  --setgroup "ceph" \
-  -d \
-  -i ${MON_NAME} \
-  --mon-data "${MON_DATA_DIR}" \
-  --public-addr "${MON_IP}:${MON_PORT_V2}"

修改ceph-client-admin-keyring secrets
将ceph admin得密钥镜像base64编码,修改ceph.client.admin.keyring

kubectl edit ceph-client-admin-keyring -n ceph

apiVersion: v1
data:
  keyring: Y2xpZW50LmFkbWluCiAgICAgICAga2V5OiBBUUFoZXR0ZEFBQUFBQkFBRUVSL3F1S0p5bGhFRDZ2UXg3YzhWdz09CiAgICAgICAgY2FwczogW21kc10gYWxsb3cgKgogICAgICAgIGNhcHM6IFttZ3JdIGFsbG93ICoKICAgICAgICBjYXBzOiBbbW9uXSBhbGxvdyAqCiAgICAgICAgY2FwczogW29zZF0gYWxsb3cgKg==
kind: Secret
metadata:
  name: ceph-client-admin-keyring
  namespace: ceph
tee /tmp/pvc-ceph-client-key.yaml <<EOF
apiVersion: v1
data:
  key: QVFBaGV0dGRBQUFBQUJBQUVFUi9xdUtKeWxoRUQ2dlF4N2M4Vnc9PQ==
kind: Secret
metadata:
  labels:
    application: ceph
    component: rbd
    release_group: ceph-openstack-config
  name: pvc-ceph-client-key
  namespace: openstack
type: kubernetes.io/rbd
EOF

kubectl apply -f /tmp/pvc-ceph-client-key.yaml

执行部署脚本

# 1.nodes节点打标签 2.修改ceph相关/values.yaml文件,镜像名称 3.创建ceph-mon-discovery endpoints
./tools/deployment/multinode/030-ceph.sh

openstack-ceph

创建ceph-mon-discovery Endpoints

tee /tmp/ceph-mon-discovery.yaml <<EOF
apiVersion: v1
kind: Endpoints
metadata:
  labels:
    app: ceph
    mon_cluster: ceph
    rook_cluster: ceph
  name: ceph-mon-discovery
  namespace: ceph
subsets:
- addresses:
  - ip: 10.2.11.176
    nodeName: k8s-1
    targetRef:
      kind: Pod
      namespace: ceph
  - ip: 10.2.11.177
    nodeName: k8s-2
    targetRef:
      kind: Pod
      namespace: ceph
  - ip: 10.2.11.178
    nodeName: k8s-3
    targetRef:
      kind: Pod
      namespace: ceph

  ports:
  - name: mon-msgr2
    port: 3300
    protocol: TCP
  - name: mon
    port: 6789
    protocol: TCP
EOF

#配置ceph-openstack相关
./tools/deployment/multinode/040-ceph-ns-activate.sh

Mariadb

修改openstack-helm脚本,使mariadb使用k8s volume

git diff tools/deployment/multinode/050-mariadb.sh
diff --git a/tools/deployment/multinode/050-mariadb.sh b/tools/deployment/multinode/050-mariadb.sh
index 5ba6d44a..2d50dd14 100755
--- a/tools/deployment/multinode/050-mariadb.sh
+++ b/tools/deployment/multinode/050-mariadb.sh
@@ -11,8 +11,6 @@ make -C ${HELM_CHART_ROOT_PATH} mariadb
 : ${OSH_EXTRA_HELM_ARGS:=""}
 helm upgrade --install mariadb ${HELM_CHART_ROOT_PATH}/mariadb \
     --namespace=openstack \
-    --set volume.use_local_path_for_single_pod_cluster.enabled=true \
-    --set volume.enabled=false \
     --values=/tmp/mariadb.yaml \
     ${OSH_EXTRA_HELM_ARGS} \
     ${OSH_EXTRA_HELM_ARGS_MARIADB}

修改openstack-helm-infra脚本 mariadb镜像名称

git diff mariadb/values.yaml
diff --git a/mariadb/values.yaml b/mariadb/values.yaml
index dcc905dc..0885b266 100644
--- a/mariadb/values.yaml
+++ b/mariadb/values.yaml
@@ -21,8 +21,8 @@ release_group: null
 images:
   tags:
     mariadb: docker.io/openstackhelm/mariadb:latest-ubuntu_focal
-    ingress: k8s.gcr.io/ingress-nginx/controller:v0.42.0
-    error_pages: k8s.gcr.io/defaultbackend:1.4
+    ingress: docker.io/willdockerhub/ingress-nginx-controller:v0.42.0
+    error_pages: docker.io/chenliujin/defaultbackend:1.4
     prometheus_create_mysql_user: docker.io/library/mariadb:10.5.9-focal
     prometheus_mysql_exporter: docker.io/prom/mysqld-exporter:v0.12.1
     prometheus_mysql_exporter_helm_tests: docker.io/openstackhelm/heat:newton-ubuntu_xenial

执行部署脚本

./tools/deployment/multinode/050-mariadb.sh

问题

# mariadb作为一个有状态服务,需要创建卷,因此,先创建ceph rbd块, 参考官网: https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/
# 再helm安装得过程中,mariadb pod得状态是 没有ready得,这个问题解决参考如下:
kubectl get cm -n openstack
kubectl delete cm mariadb-mariadb-state -n openstack
helm delete mariadb --purge
# 然后再重新进行部署
./tools/deployment/multinode/050-mariadb.sh

RabbitMQ

执行安装脚本

./tools/deployment/multinode/060-rabbitmq.sh

问题解决:

vim /var/lib/kubelet/config.yaml

修改clusterDomain: cluster.local. 将后面得.去掉,然后重启kebelet服务

Memcached

执行安装脚本

./tools/deployment/multinode/070-memcached.sh

Keystone

执行安装脚本

./tools/deployment/multinode/080-keystone.sh

Glance

修改glance存储为rbd

git diff tools/deployment/multinode/100-glance.sh
diff --git a/tools/deployment/multinode/100-glance.sh b/tools/deployment/multinode/100-glance.sh
index 20bacd23..beba0683 100755
--- a/tools/deployment/multinode/100-glance.sh
+++ b/tools/deployment/multinode/100-glance.sh
@@ -23,7 +23,7 @@ make glance
 
 #NOTE: Deploy command
 : ${OSH_EXTRA_HELM_ARGS:=""}
-: ${GLANCE_BACKEND:="pvc"}
+: ${GLANCE_BACKEND:="rbd"}

执行安装脚本

./tools/deployment/multinode/100-glance.sh

Cinder

修改crush_rule

git diff tools/deployment/multinode/110-cinder.sh
diff --git a/tools/deployment/multinode/110-cinder.sh b/tools/deployment/multinode/110-cinder.sh
index 55f3af0a..a4136c6d 100755
--- a/tools/deployment/multinode/110-cinder.sh
+++ b/tools/deployment/multinode/110-cinder.sh
@@ -26,19 +26,19 @@ conf:
     pools:
       backup:
         replication: 1
-        crush_rule: same_host
+        crush_rule: replicated_rule
         chunk_size: 8
         app_name: cinder-backup
       # default pool used by rbd1 backend
       cinder.volumes:
         replication: 1
-        crush_rule: same_host
+        crush_rule: replicated_rule
         chunk_size: 8
         app_name: cinder-volume
       # secondary pool used by rbd2 backend
       cinder.volumes.gold:
         replication: 1
-        crush_rule: same_host
+        crush_rule: replicated_rule
         chunk_size: 8
         app_name: cinder-volume
   backends:

执行安装脚本

./tools/deployment/multinode/110-cinder.sh

OpenvSwitch

执行安装脚本

kubectl label nodes k8s-node-181 openvswitch=enabled
kubectl label nodes k8s-node-182 openvswitch=enabled
kubectl label nodes k8s-node-183 openvswitch=enabled

./tools/deployment/multinode/120-openvswitch.sh

Libvirt

执行安装脚本

# libvirt POD 为init状态,依赖neutron-ovs-agent pod
./tools/deployment/multinode/130-libvirt.sh

libvirt Pod启动完成之后,创建secret密钥, 此操作可以由Init容器初始化完成

# 原始容器中没有定义secret,再创建实例得时候会报错,手动进行创建。
virsh secret-list
 UUID                                  Usage
--------------------------------------------------------------------------------
# 进入容器中
kubectl exec -it libvirt-libvirt-default-2jwf6  -n openstack bash
生成密钥xml
cat << EOF > /etc/ceph.xml
<secret ephemeral="no" private="no">
<uuid>457eb676-33da-42ec-9a8c-9293d545c337</uuid>
<usage type="ceph">
<name>client.cinder. secret</name>
</usage>
</secret>
EOF
virsh secret-define --file /etc/ceph.xml
# secret UUID是定死得,base64是 ceph中client.cinder 得密钥
virsh secret-set-value --secret 457eb676-33da-42ec-9a8c-9293d545c337 --base64 AQAi3RRhsMJxGhAAWMZvqgo62ZZ4kUd30LvukA==
#重启libvirt容器

Compute Kit (Nova and Neutron)

修改配置

git diff tools/deployment/multinode/140-compute-kit.sh
diff --git a/tools/deployment/multinode/140-compute-kit.sh b/tools/deployment/multinode/140-compute-kit.sh
index 2fec7662..4d4367b7 100755
--- a/tools/deployment/multinode/140-compute-kit.sh
+++ b/tools/deployment/multinode/140-compute-kit.sh
@@ -16,7 +16,7 @@ set -xe
 : ${RUN_HELM_TESTS:="yes"}
 
 export OS_CLOUD=openstack_helm
-CEPH_ENABLED=false
+CEPH_ENABLED=true
 if openstack service list -f value -c Type | grep -q "^volume" && \
     openstack volume type list -f value -c Name | grep -q "rbd"; then
   CEPH_ENABLED=true
@@ -118,7 +118,7 @@ make neutron
 tee /tmp/neutron.yaml << EOF
 network:
   interface:
-    tunnel: docker0
+    tunnel: eth1

nova-bootstrap会下载外网镜像,提前准备好镜像,防止下载失败

wget http://10.2.11.2/dd_files/Kubernetes/hyperkube-amd64.tgz
docker load -i hyperkube-amd64.tgz

执行安装脚本

./tools/deployment/multinode/140-compute-kit.sh

安装得过程中,neutron可能安装失败,失败之后删掉重新跑

helm list
neutron                 1               Thu Aug 12 16:51:23 2021        FAILED          neutron-0.2.8                   v1.0.0          openstack
# 将charts删除
helm delete neutron --purge

重新执行

helm upgrade --install neutron ./neutron --namespace=openstack --values=/tmp/neutron.yaml --values=../openstack-helm/neutron/values_overrides/ussuri-ubuntu_bionic.yaml

Horizon

执行安装脚本

./tools/deployment/multinode/085-horizon.sh

dashboard安装完成之后,可能会造成资源访问不到得现象,解决如下:
安装traefik-ingress

wget http://10.2.11.2/dd_files/Kubernetes/traefik.tar.gz
tar -xvf traefik.tar.gz
cd traefik
# 创建资源
kubectl apply -f .
ingress.networking.k8s.io/horizon created
Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress
ingress.extensions/horizon-a created
serviceaccount/traefik-ingress-controller created
deployment.apps/traefik-ingress-controller created
service/traefik-ingress-service created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
service/traefik-web-ui created
ingress.extensions/traefik-web-ui created
 
# 查看pod
kubectl get pod -n kube-system
traefik-ingress-controller-6dc7585cdc-97vhj   1/1     Running   0          2s
 
# 查看ingress
kubectl get ingress -n kube-system
NAME             CLASS    HOSTS                 ADDRESS   PORTS   AGE
traefik-web-ui   <none>   traefik-ui.minikube             80      2m7s

验证

查看ingress

kubectl get ingress -n openstack
NAME                          CLASS    HOSTS                                                                                               ADDRESS                               PORTS   AGE
cinder                      <none>   cinder,cinder.openstack,cinder.openstack.svc.cluster.local                                        10.2.11.177,10.2.11.178               80      50m
glance                      <none>   glance,glance.openstack,glance.openstack.svc.cluster.local                                        10.2.11.177,10.2.11.178               80      57m
horizon                     <none>   horizon,horizon.openstack,horizon.openstack.svc.cluster.local                                     10.2.11.177,10.2.11.178               80      99s
keystone                    <none>   keystone,keystone.openstack,keystone.openstack.svc.cluster.local                                  10.2.11.177,10.2.11.178               80      72m
metadata                    <none>   metadata,metadata.openstack,metadata.openstack.svc.cluster.local                                  10.2.11.177,10.2.11.178               80      34m
neutron                     <none>   neutron,neutron.openstack,neutron.openstack.svc.cluster.local                                     10.2.11.177,10.2.11.178               80      9m38s
nova                        <none>   nova,nova.openstack,nova.openstack.svc.cluster.local                                              10.2.11.177,10.2.11.178               80      34m
novncproxy                  <none>   novncproxy,novncproxy.openstack,novncproxy.openstack.svc.cluster.local                            10.2.11.177,10.2.11.178               80      34m
openstack-ingress-openstack <none>   *.openstack.svc.cluster.local                                                                     10.2.11.176,10.2.11.177,10.2.11.178   80      133m
placement                   <none>   placement,placement.openstack,placement.openstack.svc.cluster.local                               10.2.11.177,10.2.11.178               80      34m
rabbitmq-mgr-7b1733         <none>   rabbitmq-mgr-7b1733,rabbitmq-mgr-7b1733.openstack,rabbitmq-mgr-7b1733.openstack.svc.cluster.local 10.2.11.177,10.2.11.178               80      86m

配置/etc/hosts解析

10.2.11.176  k8s-1
10.2.11.177  k8s-2 cinder.openstack.svc.cluster.local glance.openstack.svc.cluster.local horizon.openstack.svc.cluster.local keystone.openstack.svc.cluster.local
10.2.11.178  k8s-3 neutron.openstack.svc.cluster.local nova.openstack.svc.cluster.local placement.openstack.svc.cluster.local

配置admin-rc环境变量

cat admin.rc
export OS_USERNAME=admin
export OS_PASSWORD=password
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_DOMAIN_NAME=default
export OS_AUTH_URL=http://keystone.openstack.svc.cluster.local/v3
export OS_IDENTITY_API_VERSION=3

windows配置解析: C:\Windows\System11\drivers\etc\hosts

10.2.11.178 horizon.openstack.svc.cluster.local
10.2.11.178 novncproxy.openstack.svc.cluster.local
10.2.11.178 traefik-ui.minikube

修改 vnc ingress

kubectl edit ingress novncproxy -n openstack
删除
medatada/annotations下得数据
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

openstack-helm 的相关文章

随机推荐

  • 混杂设备动态次设备号分析

    今天看驱动源码时 发现一个MISC DYNAMIC MINOR宏 于是分析了一下内核源码 先粘出源码 在misc register函数中 有如下语句 if misc gt minor MISC DYNAMIC MINOR int i DYN
  • 开始使用OpenCV

    开始使用OpenCV 1 编译OpenCV 1 1 使用命令行的方式配置OpenCV OpenCV官网教程 OpenCV官网教程使用命令行的方式进行配置 写的很详细 按照教程操作 一般都能成功 由于国内访问GitHub不稳定 所以推荐使用w
  • Python 微信机器人

    使用python构造一个微信聊天机器人 最近在学python的过程中无意间发现一个python库 wxpy 其可以实现让微信自动接收 处理消息并进行回复的一系列功能 感觉挺有意思的 便自行摸索学习 并成功地实现了其功能 故写下此博客作学习分
  • 谷歌浏览器插件Automa(入门,编写中,开专栏填坑中)

    谷歌浏览器插件Automa 入门 编写中 0 待成长的无代码化爬虫 1快速入门模块 1 1 中文设置 1 2 定位你想要操作的位置 1 3 进行操作 1 3 1 点击 1 3 2 输入 1 4 官方案例实战教学 1 4 1 百歌一下 1 4
  • spring boot 2.0.3 mybatis升级mybatis-plus

    项目原来是使用mybtais durid 现升级为mybatis plus2 3 durid mysql 官方文档地址 请注意 mybatis plus3 0 的配置有变化 请查看官网文档 可能是旧的 或demo 步骤如下 1 引入pom
  • Linux中文件的传输

    文件在系统中的传输 1 scp 上传 scp 本地文件 远程主机用户 远程主机ip 远程主机目录 步骤如下 下载 scp 远程主机用户 远程主机ip 远程主机目录 本地目录 步骤如下 2 rsync 远程同步 速度快 默认会忽略 文件属性
  • 【AnyQ】遇到的问题整理(一)

    一 编译出错 问题描述 In file included from home jockeyyan git repository QuestionAnsweringBot AnyQ build third party paddle src e
  • 分布式计算的基本原理

    author skate time 2010 03 08 从最近几次MMI设计会议讨论的结果来看 嵌入式程序员对于分布式计算知之甚少 他们对分布式计算有种恐惧 所以对分布式架构极力排斥 而他们的人数又占绝对优势 讨论N次 MMI的架构还是没
  • Jupyter Notebook 工作环境配置

    目录 背景 为什么要配置jupyter的工作环境呢 因为可以更方便的找到自己写的脚本所在的文件 步骤 1 新建一个文件夹 随便在哪里建 例如我在D盘新建了一个文件夹 D pythonworkspace 2 接下来配置环境变量 新建一个变量W
  • mysql的sql语句获取两点之间的距离

    一张表 表的经度和维度都为double或者decimal类型 传入参数经度116 366216 纬度39 939834 SELECT ROUND 6378 138 2 ASIN SQRT POW SIN 40 0497810000 PI 1
  • 真难!!!Java初中级岗位都能被卷到怀疑人生....

    Java一个初中级岗位有上千人同时竞争 内卷程度简直怀疑人生 最近不少群友吐槽 初中级岗位的面试简直是地狱级难度 面试官对常用框架的考察扣的都很细 但是在日常开发中又很难注意到这些细节 导致吃了很多亏 其实说到底 还是这个问题 主流框架都熟
  • SQL查询重复数据出现的次数

    背景 关系型数据库中 有这样一种情况 假设用户信息表中有一列idCard字段 该字段用来存储用户的身份证号 现在导入进来一批数据后有人告诉你库里存在重复的数据 那么此时 你需要重复的规则是什么 到底是哪列关键数据出现了重复 首先人名重复的情
  • JVM - 的类加载器(类加载子系统)

    文章目录 类加载子系统 Class Loader 作用 类加载过程 加载阶段 Loding 连接阶段 Linking 验证 Verification 准备 Preparation 解析 Resolution 初始化 Initializati
  • InsightFace_Pytorch人脸识别项目部署运行

    一 下载InsightFace Pytorch master 二 导入各种包 三 运行take pic py搜集样本 四 运行face verify py识别 遇到问题解决方法 1 导包失败 需使用python3 7版本 可以安装好 2 找
  • 2023最新C语言经典面试题汇总

    写出Bool int 指针变量 float与 零值 比较的if语句 Bool型 if flag if flag int型 if flag 0 if flag 0 指针变量 if p NULL if p NULL float型 const f
  • [机器学习与scikit-learn-49]:特征工程-特征选择(降维)-4-二级过滤-特征值与标签之间的关系:卡方过滤

    作者主页 文火冰糖的硅基工坊 文火冰糖 王文兵 的博客 文火冰糖的硅基工坊 CSDN博客 本文网址 https blog csdn net HiWangWenBing article details 124073917 目录 前言 第1章
  • android软件开发!Jetpack-MVVM-高频提问和解答,附带学习经验

    感悟 这个世界有一个 二八原则 在好多地方都发挥着作用 在Android开发上我认为也一样有用 做一个Android开发 你也许只会用到Android开发知识中的20 有80 其实你学了也不一定会用 而面试官也一样 他也可能只掌握了20 的
  • java通过JdbcTemplate连接多个(2个以上)不同类型的数据库

    1 业务场景 要求获取不同数据库的表信息和表结构信息 数据库类型包括oracle MySQL SqlServer 2 实现思路 step1 新增数据库连接信息 主机 端口 数据库类型 实例名 用户名 密码 状态 step2 测试连接 成功状
  • Python中常用的设计模式

    主要参考网址 http www pythontip com pythonPatterns 创建型模式 1 抽象工厂模式 class PetShop def init self animal factory None self pet fac
  • openstack-helm

    文章目录 openstack helm 安装 helm helm 初始化 openstack helm下载 openstack组件安装 安装ingress 安装ceph openstack ceph Mariadb RabbitMQ Mem