openstack octavia部署 ussuri U版

2023-05-16

注:1.octavia源码下载地址:

    cd /home

    git clone https://github.com/openstack/octavia.git -b stable/ussuri    #证书

    git clone https://github.com/openstack/octavia-dashboard.git -b stable/ussuri    #在dashboard中添加loadbalancer

注:如各节点重启后需重新配置o-hm0网卡ip和mac

主节点(controller1)基础配置

一、创建数据库

mysql -uroot -p123456
CREATE DATABASE octavia;
GRANT ALL PRIVILEGES ON octavia.* TO 'octavia'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON octavia.* TO 'octavia'@'%' IDENTIFIED BY '123456';
flush privileges;
exit;

二、安装软件包

yum -y install\
 openstack-octavia-api.noarch\
 openstack-octavia-common.noarch \
 openstack-octavia-health-manager.noarch \
 openstack-octavia-housekeeping.noarch \
 openstack-octavia-worker.noarch \
 python3-octaviaclient.noarch

三、创建keystone认证体系(用户、角色、endpoint)

openstack user create --domain default  --password 123456 octavia
openstack role add --project service --user octavia admin
openstack service create load-balancer --name octavia
openstack endpoint create octavia public http://controller1:9876 --region RegionOne 
openstack endpoint create octavia admin http://controller1:9876 --region RegionOne
openstack endpoint create octavia internal http://controller1:9876 --region RegionOne

四、导入Amphora镜像,按需求创建实例类型

# 先下载U版镜像
https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-centos-8.qcow2

# source service octavia的环境变量
export OS_USERNAME=octavia
export OS_PASSWORD=123456
export OS_PROJECT_NAME=service
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller1:5000/v3
export OS_IDENTITY_API_VERSION=3

# 上传镜像
openstack image create amphora-x64-haproxy --public --container-format=bare \
 --disk-format qcow2 --file test-only-amphora-x64-haproxy-centos-8.qcow2 --tag amphora 

# 创建实例类型
openstack flavor create --ram 4096 --disk 50 --vcpus 2 flavor

五、创建安全组

# Amphora 虚拟机使用,LB Network 与 Amphora 通信,分别为service下的安全组添加规则

openstack security group create lb-mgmt-sec-grp --project service
openstack security group rule create --protocol udp --dst-port 5555 lb-mgmt-sec-grp
openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp
openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp
openstack security group rule create --protocol icmp lb-mgmt-sec-grp

# Amphora 虚拟机使用,Health Manager 与 Amphora 通信,分别为service下的安全组添加规则

openstack security group create lb-health-mgr-sec-grp --project service
openstack security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp
openstack security group rule create --protocol tcp --dst-port 22 lb-health-mgr-sec-grp
openstack security group rule create --protocol tcp --dst-port 9443 lb-health-mgr-sec-grp

六、创建管理网络(注意要指定租户)

openstack network create lb-mgmt-net --project service
openstack subnet create --subnet-range 192.168.0.0/24 --allocation-pool \ 
start=192.168.0.2,end=192.168.0.100 --network lb-mgmt-net lb-mgmt-subnet

七、创建管理端口

openstack port create octavia-health-manager-standalone-listen-port \
  --security-group lb-health-mgr-sec-grp \
  --device-owner Octavia:health-mgr \
  --host <hostname> --network lb-mgmt-net \
  --project service
ovs-vsctl --may-exist add-port br-int o-hm0 \
  -- set Interface o-hm0 type=internal \
  -- set Interface o-hm0 external-ids:iface-status=active \
  -- set Interface o-hm0 external-ids:attached-mac=<Health Manager Listen Port MAC> \
  -- set Interface o-hm0 external-ids:iface-id=<Health Manager Listen Port ID>

八、为管理端口设置ip(Health Manager 监听端口设置 IP)

ip link set dev o-hm0 address <Health Manager Listen Port MAC>
ip addr add <Health Manager Listen Port IP/24> dev o-hm0   #<>处是添加ip和子网
ip link set dev o-hm0 up 	#启动网卡使配置生效

 

九、生成octavia controller与amphora通信的证书

U版的证书生成脚本不会用 - - 手动生成

# 注意,以下所有让输入pass密码的,直接输入123456回车
# 让输入其他参数的直接回车
cd /home
mkdir certs
chmod 700 certs
cd certs
cp /home/octavia/bin/openssl.cnf ./
mkdir client_ca
mkdir server_ca
cd server_ca
mkdir certs crl newcerts private
chmod 700 private
touch index.txt
echo 1000 > serial
openssl genrsa -aes256 -out private/ca.key.pem 4096
chmod 400 private/ca.key.pem
openssl req -config ../openssl.cnf -key private/ca.key.pem -new -x509 -days \
7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem
cd ../client_ca
mkdir certs crl csr newcerts private
chmod 700 private
touch index.txt
echo 1000 > serial
openssl genrsa -aes256 -out private/ca.key.pem 4096
chmod 400 private/ca.key.pem
openssl req -config ../openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 \
-sha256 -extensions v3_ca -out certs/ca.cert.pem
openssl genrsa -aes256 -out private/client.key.pem 2048
openssl req -config ../openssl.cnf -new -sha256 -key private/client.key.pem -out \
csr/client.csr.pem
openssl ca -config ../openssl.cnf -extensions usr_cert -days 7300 -notext \
-md sha256 -in csr/client.csr.pem -out certs/client.cert.pem
openssl rsa -in private/client.key.pem -out private/client.cert-and-key.pem
cat certs/client.cert.pem >> private/client.cert-and-key.pem
cd ..
mkdir /etc/octavia/certs
chmod 700 /etc/octavia/certs
cp server_ca/private/ca.key.pem /etc/octavia/certs/server_ca.key.pem
chmod 700 /etc/octavia/certs/server_ca.key.pem
cp server_ca/certs/ca.cert.pem /etc/octavia/certs/server_ca.cert.pem
cp client_ca/certs/ca.cert.pem /etc/octavia/certs/client_ca.cert.pem
cp client_ca/private/client.cert-and-key.pem /etc/octavia/certs/client.cert-and-key.pem
chmod 700 /etc/octavia/certs/client.cert-and-key.pem
chown -R octavia.octavia /etc/octavia/certs

十、创建密钥对

mkdir -p /etc/octavia/.ssh 
ssh-keygen -b 2048 -t rsa -N "" -f /etc/octavia/.ssh/octavia_ssh_key
nova keypair-add --pub-key=/etc/octavia/.ssh/octavia_ssh_key.pub octavia_ssh_key --user {octavia_user_id}

修改配置文件

十一、修改配置文件

#/etc/octavia/octavia.conf

[DEFAULT]
transport_url = rabbit://openstack:openstack@controller1:5672, \
openstack:openstack@controller2:5672,openstack:openstack@controller3:5672
[api_settings]
bind_host = 172.27.125.201  <host IP>
bind_port = 9876
api_handler = queue_producer
auth_strategy = keystone
[database]
connection = mysql+pymysql://octavia:123456@controller1:3306/octavia
[health_manager]
bind_ip = 192.168.0.12  <Health Manager Listen Port IP>
bind_port = 5555
controller_ip_port_list = 192.168.0.12:5555,  192.168.0.*:5555, \ 
  <ha环境需填写其余节点的网卡ip:端口>
heartbeat_key = insecure
[keystone_authtoken]
auth_uri = http://172.27.125.106:5000
auth_url = http://172.27.125.106:5000
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = octavia
password = 123456
[certificates]
cert_generator = local_cert_generator
ca_private_key_passphrase = 123456
ca_private_key = /etc/octavia/certs/server_ca.key.pem
ca_certificate = /etc/octavia/certs/server_ca.cert.pem
[haproxy_amphora]
client_cert = /etc/octavia/certs/client.cert-and-key.pem
server_ca = /etc/octavia/certs/server_ca.cert.pem
key_path = /etc/octavia/.ssh/octavia_ssh_key
base_path = /var/lib/octavia
base_cert_dir = /var/lib/octavia/certs
connection_max_retries = 5500
connection_retry_interval = 5
rest_request_conn_timeout = 10
rest_request_read_timeout = 120
[controller_worker]
client_ca = /etc/octavia/certs/client_ca.cert.pem
# <openstack image list查看第三步上传镜像时的tag标签>
amp_image_tag = amphora		
# <openstack image show查看第三步创建镜像的owner id(请注意是owner的id)>
amp_image_owner_id = 22d71ab1b5b548f7b076b61e7c3ed7dc	
# <openstack flavor list查看第三步创建的实例类型id>
amp_flavor_id = c6cc5162-26cb-4e98-aa49-efb3eb369eb2	
# <openstack security group  list查看第五步创建的lb-mgmt-sec-grp id>
amp_secgroup_list = 0428056a-f1fb-457e-bd33-c2d23eb6d2cd	
# <openstack network list查看第六步创建的lb-mgmt-net id>
amp_boot_network_list = 450227cc-11e8-4422-8bf5-540ef5cb2dfe    
amp_ssh_key_name = octavia_ssh_key
network_driver = allowed_address_pairs_driver
compute_driver = compute_nova_driver
amphora_driver = amphora_haproxy_rest_driver
# 物理环境下可以不用配置此项,如果在虚拟机环境中部署可适当调整下面两个参数
workers = 2   
# 物理环境下可以不用配置此项
amp_active_retries = 100	
# 物理环境下可以不用配置此项 
amp_active_wait_sec = 2		 
loadbalancer_topology = ACTIVE_STANDBY   
[oslo_messaging]
topic = octavia_prov
rpc_thread_pool_size = 2
[house_keeping]
load_balancer_expiry_age = 3600
amphora_expiry_age = 3600
[service_auth]
memcached_servers = controller1:11211,controller2:11211,controller3:11211
project_domain_name = default
project_name = service
user_domain_name = default
password = 123456
username = octavia
auth_type = password
auth_url = http://172.27.125.106:5000
auth_uri = http://172.27.125.106:5000

十二、初始化octavia数据库

octavia-db-manage   upgrade head

十三、启动服务

#重启octavia各组件

(如果使用httpd纳管octavia-api,就需要stop并disable octavia-api防止octavia发生端口冲突)

systemctl restart octavia-api.service
systemctl restart octavia-worker.service
systemctl restart octavia-health-manager.service
systemctl restart octavia-housekeeping.service

#设为开机启动

systemctl enable octavia-api.service
systemctl enable octavia-worker.service
systemctl enable octavia-health-manager.service
systemctl enable octavia-housekeeping.service

十四、添加 Load Balancers 页面

cd octavia_file/octavia-dashboard
python setup.py install
cd octavia_dashboard/enabled/
cp _1482_project_load_balancer_panel.py /usr/share/openstack-dashboard/openstack_dashboard/enabled/
cd /usr/share/openstack-dashboard
./manage.py collectstatic
./manage.py compress
systemctl restart httpd   

其余节点(controller2/3)基础配置

一、安装软件包

yum -y install\
 openstack-octavia-api.noarch\
 openstack-octavia-common.noarch \
 openstack-octavia-health-manager.noarch \
 openstack-octavia-housekeeping.noarch \
 openstack-octavia-worker.noarch \
 python2-octaviaclient.noarch

 

二、创建管理端口

openstack port create octavia-health-manager-standalone-listen-port \
  --security-group lb-health-mgr-sec-grp \
  --device-owner Octavia:health-mgr \
  --host <hostname> --network lb-mgmt-net \
  --project service
ovs-vsctl --may-exist add-port br-int o-hm0 \
  -- set Interface o-hm0 type=internal \
  -- set Interface o-hm0 external-ids:iface-status=active \
  -- set Interface o-hm0 external-ids:attached-mac=<Health Manager Listen Port MAC> \
  -- set Interface o-hm0 external-ids:iface-id=<Health Manager Listen Port ID>

三、为管理端口设置IP

ip link set dev o-hm0 address <Health Manager Listen Port MAC>
ip addr add <ip-addr/24> dev o-hm0 
ip link set dev o-hm0 up

四、复制主节点controller1上已经生成的证书和秘钥

mkdir -p /etc/octavia/.ssh/
mkdir -p /etc/octavia/certs/
进入controller1中把目录/etc/octavia/certs和/etc/octavia/.ssh下的文件打包传输的其余各节点中的对应文件夹下
chown octavia:octavia /etc/octavia/certs -R

五、修改配置文件

#/etc/octavia/octavia.conf

[DEFAULT]
transport_url = rabbit://openstack:openstack@controller1:5672, \
openstack:openstack@controller2:5672,openstack:openstack@controller3:5672
[api_settings]
bind_host = 172.27.125.202 <host IP>
bind_port = 9876
api_handler = queue_producer
auth_strategy = keystone
[database]
connection = mysql+pymysql://octavia:123456@controller2:3306/octavia
[health_manager]
bind_ip = 192.168.0.* <Health Manager Listen Port IP>
bind_port = 5555
# <ha环境需填写其余节点的网卡ip:端口>
controller_ip_port_list = 192.168.0.11:5555, 192.168.0.*:5555 
heartbeat_key = insecure
[keystone_authtoken]
auth_uri = http://172.27.125.106:5000
auth_url = http://172.27.125.106:5000
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = octavia
password = 123456
[certificates]
cert_generator = local_cert_generator
ca_private_key_passphrase = 123456
ca_private_key = /etc/octavia/certs/server_ca.key.pem
ca_certificate = /etc/octavia/certs/server_ca.cert.pem
[haproxy_amphora]
client_cert = /etc/octavia/certs/client.cert-and-key.pem
server_ca = /etc/octavia/certs/server_ca.cert.pem
key_path = /etc/octavia/.ssh/octavia_ssh_key
base_path = /var/lib/octavia
base_cert_dir = /var/lib/octavia/certs
connection_max_retries = 5500
connection_retry_interval = 5
rest_request_conn_timeout = 10
rest_request_read_timeout = 120
[controller_worker]
client_ca = /etc/octavia/certs/client_ca.cert.pem
amp_image_tag = amphora		
# <openstack image show IMAGE_ID	查看第三步创建镜像的owner id(请注意是owner的id)>
amp_image_owner_id = 22d71ab1b5b548f7b076b61e7c3ed7dc	
# <openstack flavor list查看第三步创建的实例类型id>
amp_flavor_id = c6cc5162-26cb-4e98-aa49-efb3eb369eb2	
# <openstack security group  list查看第五步创建的lb-mgmt-sec-grp id>
amp_secgroup_list = 0428056a-f1fb-457e-bd33-c2d23eb6d2cd	
# <openstack network list查看第六步创建的lb-mgmt-net id>
amp_boot_network_list = 450227cc-11e8-4422-8bf5-540ef5cb2dfe    
amp_ssh_key_name = octavia_ssh_key
network_driver = allowed_address_pairs_driver
compute_driver = compute_nova_driver
amphora_driver = amphora_haproxy_rest_driver
workers = 2
amp_active_retries = 100
amp_active_wait_sec = 2
loadbalancer_topology = ACTIVE_STANDBY   
[oslo_messaging]
topic = octavia_prov
rpc_thread_pool_size = 2
[house_keeping]
load_balancer_expiry_age = 3600
amphora_expiry_age = 3600
[service_auth]
memcached_servers = controller1:11211,controller2:11211,controller3:11211
project_domain_name = default
project_name = service
user_domain_name = default
password = 123456
username = octavia
auth_type = password
auth_url = http://172.27.125.106:5000
auth_uri = http://172.27.125.106:5000

六、初始化octavia数据库

 

octavia-db-manage   upgrade head

七、启动服务

(如果使用httpd纳管octavia-api,就需要stop并disable octavia-api防止octavia发生端口冲突)

#重启octavia各组件

systemctl restart octavia-api.service
systemctl restart octavia-worker.service
systemctl restart octavia-health-manager.service
systemctl restart octavia-housekeeping.service

#设为开机启动

systemctl enable octavia-api.service
systemctl enable octavia-worker.service
systemctl enable octavia-health-manager.service
systemctl enable octavia-housekeeping.service

 

 

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

openstack octavia部署 ussuri U版 的相关文章

  • 每天进步一点点——五分钟理解一致性哈希算法(consistent hashing)

    转载请说明出处 http blog csdn net cywosp article details 23397179 一致性哈希算法在1997年由麻省理工学院提出的一种分布式哈希 DHT 实现算法 设计目标是为了解决因特网中的热点 Hot
  • OpenStack中制作和上传Docker镜像

    一 在docker节点新建dockerfile文件 vim dockerfie 二 文件里面填 无需任何修改 FROM ubuntu 14 04 MAINTAINER ZhangKe lt 294667739 qq com gt RUN a
  • Ceph性能优化总结(v0.94)

    如需转载请标明作者 原文地址 http xiaoquqi github io blog 2015 06 28 ceph performance optimization summary 最近一直在忙着搞Ceph存储的优化和测试 看了各种资料
  • openstack装配置 Neutron( 控制节点

    在控制节点 controller 安装 Neutron 服务 同时配置支持 vxlan 1 登录数据库创建 neutron 数据库 MariaDB none gt CREATE DATABASE neutron default charac
  • Pecemaker+Corosync+Haproxy高可用Openstack集群实战

    一 DRBD简介 DRBD的全称为 Distributed Replicated Block Device DRBD 分布式块设备复制 DRBD是由内核模块和相关脚本而构成 用以构建高可用性的集群 其实现方式是通过网络来镜像整个设备 它允许
  • OpenStack H版发布

    根据官方发布的消息 OpenStack 2013 2 代号Havana 最终版已经发布 除了增加OpenStack Metering Ceilometer 和OpenStack Orchestration Heat 两个新组件外 还完成了4
  • kolla-ansible部署openstack+opendaylight

    1 环境准备 准备三台机器 分别作为OpenStack的controller和compute节点 OpenDayLight节点 另外把controller作为本次的部署节点 其中controller和compute节点各两张网卡 odl节点
  • openvswitch--OpenFlow 流表设置

    流规则组成 每条流规则由一系列字段组成 分为基本字段 条件字段和动作字段三部分 基本字段包括生效时间duration sec 所属表项table id 优先级priority 处理的数据包数n packets 空闲超时时间idle time
  • No package openstack-keystone available.Nothing to do

    root controller yum repos d yum y install openstack keystone httpd mod wsgi python openstackclient memcached python memc
  • openstack 安装并验证 Nova( 计算节点 + 控制节点)

    安装数据库 登录数据库创建 nova nova api nova cell0 数据库 root controller etcd mysql uroot pmaster Welcome to the MariaDB monitor Comma
  • Keystone 高可靠性部署与性能测试

    Goal Keystone Region 为跨地域的 Openstack 集群提供了统一的认证和用户租户管理 目前公司在国内外部署了数十套 Openstack 集群 其中既有集群在内网 又有集群在公网 既有 Havana 集群 也有 Ice
  • openstack热迁移机制分析(libvirt热迁移模型、nova热迁移控制逻辑、调试方法)

    前段时间在解决大内存热迁移失败的时候 查阅了下openstack热迁移相关知识 有了一些记录跟大家分享下 以基于L版openstack qemu kvm 跟大家分享下API库libvirt的热迁移机制和nova控制逻辑的一些记录 注 这篇博
  • CentOS7安装OpenStack(Liberty)

    1 安装yum源 yum install https buildlogs centos org centos 7 cloud x86 64 openstack liberty centos release openstack liberty
  • cehp 维护(二)常见告警维护

    零 修订记录 序号 修订时间 修订内容 1 2021 11 18 新增 一 摘要 笨猪主要介绍ceph常见告警及维护方法或过程中遇到的问题 二 环境信息 2 1 ceph版本 cephadmin proceph01 ceph v ceph
  • OpenStack--创建虚拟机

    启动虚拟机之前需要先做一些前期准备 比如网络和 IP 地址分配 虚拟机 类型创建等等 具体如下 1 网络规划及 IP 划分 官网安装文档 https docs openstack org ocata zh CN install guide
  • openstack-nova-compute.service起不来

    1 启动服务 2 查看compute nova日志tail var log nova nova compute log 发现身份验证机制AMQPLAIN拒绝登录 3 关闭防火墙 root controller systemctl stop
  • 如何设置docker容器的ip?

    有人可以告诉我如何将容器的 ip 设置为特定的 ip 吗 我不想将网桥设置为 IP 集 我在网络中得到的所有结果都是将 ips 范围设置为 docker 桥 您可以使用主机的现有 IP 如下所示 docker run p 127 0 0 1
  • Openstack.Net SDK无法访问带区域的服务

    使用我们自己的硬件 我们安装了带有所有组件的普通 openstack 但是由于区域问题 我在访问除身份之外的服务时遇到问题 使用的代码如下 使用我们创建的管理员帐户和管理员租户进行调用 public static void TestAcce
  • 创建新的 docker-machine 实例始终无法使用 openstack 驱动程序验证证书

    每次我尝试通过创建一个新实例docker machine在开放堆栈上 我总是在验证证书时收到此错误 我必须在创建实例后立即重新生成证书才能使用这些实例 docker machine create driver openstack opens
  • 云平台- sudo:无法解析主机[关闭]

    Closed 这个问题不符合堆栈溢出指南 help closed questions 目前不接受答案 我在 Amazon EC2 和 openstack 上使用 Linux 作为基于云的服务器 当尝试运行时 sudo chhown ubun

随机推荐