nova-console-container spice not installing correct
Affects | Status | Importance | Assigned to | Milestone | |
---|---|---|---|---|---|
OpenStack-Ansible |
Expired
|
Undecided
|
Unassigned |
Bug Description
The container is not installed correctly and spice is not running
root@cloudsrv00
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 194/sshd
tcp6 0 0 :::22 :::* LISTEN 194/sshd
unix 2 [ ACC ] STREAM LISTENING 3258652 1/init /run/systemd/
unix 2 [ ACC ] STREAM LISTENING 3258657 1/init /run/systemd/
root@cloudsrv00
# Ansible managed
[DEFAULT]
# Disable stderr logging
use_stderr = False
# Logs / State
debug = False
fatal_deprecations = False
log_dir = /var/log/nova
state_path = /var/lib/nova
rootwrap_config = /etc/nova/
service_down_time = 120
default_
# Scheduler
cpu_allocation_
disk_allocation
ram_allocation_
reserved_
reserved_
# Compute
compute_driver = libvirt.
instance_
instances_path = /var/lib/
allow_resize_
image_cache_
resume_
# Api's
enabled_apis = osapi_compute,
osapi_compute_
# Rpc all
transport_url = rabbit:
executor_
rpc_response_
# Metadata
metadata_host = 192.168.0.250
metadata_port = 8775
metadata_workers = 12
# Network
dhcp_domain = openstacklocal
force_dhcp_release = True
dhcpbridge_flagfile = /etc/nova/nova.conf
firewall_driver = nova.virt.
my_ip = 192.168.6.193
default_
use_neutron = True
## Vif
linuxnet_
libvirt_vif_type = ethernet
vif_plugging_
vif_plugging_
# Hypervisor
default_
# Configdrive
force_config_drive = False
# Policy
max_age = 0
# Ceilometer notification configurations
instance_
instance_
notify_
# Notifications
[oslo_messaging
notification_topics = notifications
driver = messagingv2
transport_url = rabbit:
# Cache
[cache]
enabled = true
backend = oslo_cache.
memcache_servers = 192.168.
# Cinder
[cinder]
catalog_info = volumev2:
cross_az_attach = True
os_region_name = RegionOne
[spice]
agent_enabled = True
enabled = True
# Console Url and binds
html5proxy_base_url = https:/
server_listen = 192.168.6.193
server_
[vnc]
enabled = False
# Glance
[glance]
api_servers = http://
# Neutron
[neutron]
url = http://
region_name = RegionOne
auth_type = password
# Keystone client plugin password option
password = xxx
# Keystone client plugin username option
username = neutron
project_name = service
user_domain_name = Default
project_domain_name = Default
# Keystone client plugin authentication URL option
auth_url = http://
insecure = False
metadata_
service_
# Placement
[placement]
os_region_name = RegionOne
os_interface = internal
auth_type = "password"
password = xxx
username = placement
project_name = service
user_domain_name = Default
project_domain_name = Default
auth_url = http://
insecure = False
[conductor]
workers = 12
[keystone_
insecure = False
auth_type = password
auth_url = http://
auth_uri = http://
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = 53613c30be6dfca
region_name = RegionOne
memcached_servers = 192.168.
token_cache_time = 300
# if your memcached server is shared, use these settings to avoid cache poisoning
memcache_
memcache_secret_key = xxxx
[database]
connection = mysql+pymysql:
max_overflow = 10
max_pool_size = 120
pool_timeout = 30
[api_database]
connection = mysql+pymysql:
max_overflow = 10
max_pool_size = 120
pool_timeout = 30
[placement_
connection = mysql+pymysql:
max_overflow = 10
max_pool_size = 120
pool_timeout = 30
[oslo_concurrency]
lock_path = /var/lock/nova
[oslo_messaging
rabbit_use_ssl = True
rpc_conn_pool_size = 30
[libvirt]
inject_partition = -2
inject_password = False
inject_key = False
use_virtio_
cpu_mode = none
virt_type = qemu
remove_
live_migration_uri = "qemu+ssh:
live_migration_
hw_disk_discard = ignore
disk_cachemodes =
[wsgi]
api_paste_config = /etc/nova/
secure_
[api]
auth_strategy = keystone
enable_
use_forwarded_for = False
[scheduler]
max_attempts = 5
scheduler_driver = filter_scheduler
periodic_
host_manager = host_manager
discover_
[filter_scheduler]
max_instances_
max_io_ops_per_host = 10
ram_weight_
available_filters = nova.scheduler.
enabled_filters = RetryFilter,
host_subset_size = 10
weight_classes = nova.scheduler.
use_baremetal_
tracks_
[quota]
cores = 20
fixed_ips = -1
floating_ips = 10
injected_
injected_
injected_files = 5
instances = 10
key_pairs = 100
metadata_items = 128
ram = 51200
security_
security_groups = 10
server_
server_groups = 10
[upgrade_levels]
compute=auto
root@CloudSRV001:~# cat /etc/openstack_
cidr_networks:
container: 192.168.0.0/20
tunnel: 192.168.32.0/20
storage: 192.168.16.0/20
snet: 10.207.134.0/24
used_ips:
- "192.168.
- "192.168.0.254"
- "192.168.
- "192.168.
- "192.168.
- "10.207.
global_overrides:
#
# The below domain name must resolve to an IP address
# in the CIDR specified in haproxy_
# If using different protocols (https/http) for the public/internal
# endpoints the two addresses must be different.
#
external_
internal_
tunnel_bridge: "br-vxlan"
management_
provider_
- network:
ip_from_q: "container"
type: "raw"
- all_containers
- hosts
- network:
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
- neutron_
- network:
type: "flat"
net_name: "flat"
- neutron_
- network:
type: "vlan"
range: "100:500"
net_name: "vlan"
- neutron_
- network:
ip_from_q: "storage"
type: "raw"
- glance_api
- cinder_api
- cinder_volume
- nova_compute
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# repository (apt cache, python packages, etc)
repo-infra_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# rsyslog server
log_hosts:
cloudsrv002:
ip: 192.168.0.12
###
### OpenStack
###
# keystone
identity_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# cinder api services
storage-
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
cloudsrv002:
ip: 192.168.0.12
container_vars:
- server: "192.168.0.14"
type: "nfs"
options: "_netdev,auto"
cloudsrv003:
ip: 192.168.0.13
container_vars:
- server: "192.168.0.14"
type: "nfs"
options: "_netdev,auto"
cloudsrv004:
ip: 192.168.0.14
container_vars:
- server: "192.168.0.14"
type: "nfs"
options: "_netdev,auto"
# nova api, conductor, etc services
compute-
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# heat
orchestration_
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# horizon
dashboard_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# neutron server, agents (L3, etc)
network_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# ceilometer (telemetry API)
metering-
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# aodh (telemetry alarm service)
metering-
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# gnocchi (telemetry metrics storage)
metrics_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# nova hypervisors
compute_hosts:
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# ceilometer compute agent (telemetry)
metering-
cloudsrv002:
ip: 192.168.0.12
cloudsrv003:
ip: 192.168.0.13
cloudsrv004:
ip: 192.168.0.14
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
cloudsrv002:
ip: 192.168.0.12
container_vars:
cinder_
nfs_volume:
shares:
- {ip: "192.168.0.12", share: "/cinder-volume"}
- {ip: "192.168.0.13", share: "/cinder-volume"}
cloudsrv003:
ip: 192.168.0.13
container_vars:
cinder_
nfs_volume:
shares:
- {ip: "192.168.0.12", share: "/cinder-volume"}
- {ip: "192.168.0.13", share: "/cinder-volume"}
cloudsrv004:
ip: 192.168.0.14
container_vars:
cinder_
nfs_volume:
shares:
- {ip: "192.168.0.12", share: "/cinder-volume"}
- {ip: "192.168.0.13", share: "/cinder-volume"}
What version are you running? Is that master?
Was there any task that failed? Could you post a run log?
Thank you in advance.