nova-console-container spice not installing correct

Bug #1723029 reported by PerToft
10
This bug affects 2 people
Affects Status Importance Assigned to Milestone
OpenStack-Ansible
Expired
Undecided
Unassigned

Bug Description

The container is not installed correctly and spice is not running

root@cloudsrv002-nova-console-container-324fd301:~# netstat -lanp | grep LISTEN
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 194/sshd
tcp6 0 0 :::22 :::* LISTEN 194/sshd
unix 2 [ ACC ] STREAM LISTENING 3258652 1/init /run/systemd/private
unix 2 [ ACC ] STREAM LISTENING 3258657 1/init /run/systemd/journal/stdout

root@cloudsrv002-nova-console-container-324fd301:~# cat /etc/nova/nova.conf
# Ansible managed

[DEFAULT]
# Disable stderr logging
use_stderr = False
# Logs / State
debug = False
fatal_deprecations = False
log_dir = /var/log/nova
state_path = /var/lib/nova
rootwrap_config = /etc/nova/rootwrap.conf
service_down_time = 120
default_schedule_zone = nova

# Scheduler
cpu_allocation_ratio = 2.0
disk_allocation_ratio = 1.0
ram_allocation_ratio = 1.0
reserved_host_disk_mb = 0
reserved_host_memory_mb = 2048

# Compute
compute_driver = libvirt.LibvirtDriver
instance_name_template = instance-%08x
instances_path = /var/lib/nova/instances
allow_resize_to_same_host = True
image_cache_manager_interval = 0
resume_guests_state_on_host_boot = False

# Api's
enabled_apis = osapi_compute,metadata
osapi_compute_workers = 12

# Rpc all
transport_url = rabbit://nova:xxxxx@192.168.1.172:5671,nova:xxxxx@192.168.4.106:5671,nova:xxxx@192.168.7.200:5671//nova
executor_thread_pool_size = 64
rpc_response_timeout = 60

# Metadata
metadata_host = 192.168.0.250
metadata_port = 8775
metadata_workers = 12

# Network
dhcp_domain = openstacklocal
force_dhcp_release = True
dhcpbridge_flagfile = /etc/nova/nova.conf
firewall_driver = nova.virt.firewall.NoopFirewallDriver
my_ip = 192.168.6.193
default_floating_pool = public
use_neutron = True

## Vif
linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
libvirt_vif_type = ethernet
vif_plugging_timeout = 10
vif_plugging_is_fatal = False

# Hypervisor
default_ephemeral_format = ext4

# Configdrive
force_config_drive = False

# Policy
max_age = 0

# Ceilometer notification configurations
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state

# Notifications
[oslo_messaging_notifications]
notification_topics = notifications
driver = messagingv2
transport_url = rabbit://nova:xxxx@192.168.1.172:5671,nova:xxx@192.168.4.106:5671,nova:xxx@192.168.7.200:5671//nova
# Cache
[cache]
enabled = true
backend = oslo_cache.memcache_pool
memcache_servers = 192.168.13.119:11211,192.168.4.143:11211,192.168.5.173:11211

# Cinder
[cinder]
catalog_info = volumev2:cinderv2:internalURL
cross_az_attach = True
os_region_name = RegionOne

[spice]
agent_enabled = True
enabled = True
# Console Url and binds
html5proxy_base_url = https://10.207.134.250:6082/spice_auto.html
server_listen = 192.168.6.193
server_proxyclient_address = 192.168.6.193

[vnc]
enabled = False

# Glance
[glance]
api_servers = http://192.168.0.250:9292

# Neutron
[neutron]
url = http://192.168.0.250:9696
region_name = RegionOne
auth_type = password
# Keystone client plugin password option
password = xxx
# Keystone client plugin username option
username = neutron
project_name = service
user_domain_name = Default
project_domain_name = Default
# Keystone client plugin authentication URL option
auth_url = http://192.168.0.250:35357/v3
insecure = False
metadata_proxy_shared_secret = xxx
service_metadata_proxy = True

# Placement
[placement]
os_region_name = RegionOne
os_interface = internal
auth_type = "password"
password = xxx
username = placement
project_name = service
user_domain_name = Default
project_domain_name = Default
auth_url = http://192.168.0.250:35357/v3
insecure = False

[conductor]
workers = 12

[keystone_authtoken]
insecure = False
auth_type = password
auth_url = http://192.168.0.250:35357
auth_uri = http://192.168.0.250:5000
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = 53613c30be6dfca58143320
region_name = RegionOne

memcached_servers = 192.168.13.119:11211,192.168.4.143:11211,192.168.5.173:11211

token_cache_time = 300

# if your memcached server is shared, use these settings to avoid cache poisoning
memcache_security_strategy = ENCRYPT
memcache_secret_key = xxxx

[database]
connection = mysql+pymysql://xxx@192.168.0.250/nova?charset=utf8
max_overflow = 10
max_pool_size = 120
pool_timeout = 30

[api_database]
connection = mysql+pymysql://nova_api:xxx@192.168.0.250/nova_api?charset=utf8
max_overflow = 10
max_pool_size = 120
pool_timeout = 30

[placement_database]
connection = mysql+pymysql://nova_placement:xxx@192.168.0.250/nova_placement?charset=utf8
max_overflow = 10
max_pool_size = 120
pool_timeout = 30

[oslo_concurrency]
lock_path = /var/lock/nova

[oslo_messaging_rabbit]
rabbit_use_ssl = True
rpc_conn_pool_size = 30

[libvirt]
inject_partition = -2
inject_password = False
inject_key = False
use_virtio_for_bridges = True
cpu_mode = none
virt_type = qemu
remove_unused_resized_minimum_age_seconds = 3600

live_migration_uri = "qemu+ssh://nova@%s/system?no_verify=1&keyfile=/var/lib/nova/.ssh/id_rsa"
live_migration_tunnelled = True
hw_disk_discard = ignore
disk_cachemodes =

[wsgi]
api_paste_config = /etc/nova/api-paste.ini
secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO

[api]
auth_strategy = keystone
enable_instance_password = True
use_forwarded_for = False

[scheduler]
max_attempts = 5
scheduler_driver = filter_scheduler
periodic_task_interval = 60
host_manager = host_manager
discover_hosts_in_cells_interval = 60

[filter_scheduler]
max_instances_per_host = 50
max_io_ops_per_host = 10
ram_weight_multiplier = 5.0
available_filters = nova.scheduler.filters.all_filters
enabled_filters = RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,AggregateCoreFilter,AggregateDiskFilter
host_subset_size = 10
weight_classes = nova.scheduler.weights.all_weighers
use_baremetal_filters = False
tracks_instance_changes = True

[quota]
cores = 20
fixed_ips = -1
floating_ips = 10
injected_file_content_bytes = 10240
injected_file_path_length = 255
injected_files = 5
instances = 10
key_pairs = 100
metadata_items = 128
ram = 51200
security_group_rules = 20
security_groups = 10
server_group_members = 10
server_groups = 10

[upgrade_levels]
compute=auto

root@CloudSRV001:~# cat /etc/openstack_deploy/openstack_user_config.yml
cidr_networks:
  container: 192.168.0.0/20
  tunnel: 192.168.32.0/20
  storage: 192.168.16.0/20
  snet: 10.207.134.0/24

used_ips:
  - "192.168.0.1,192.168.0.20"
  - "192.168.0.254"
  - "192.168.16.1,192.168.16.254"
  - "192.168.32.1,192.168.32.254"
  - "192.168.48.1,192.168.48.254"
  - "10.207.134.1,10.207.134.192"

global_overrides:
  #
  # The below domain name must resolve to an IP address
  # in the CIDR specified in haproxy_keepalived_external_vip_cidr.
  # If using different protocols (https/http) for the public/internal
  # endpoints the two addresses must be different.
  #
  external_lb_vip_address: "10.207.134.250"
  internal_lb_vip_address: "192.168.0.250"

  tunnel_bridge: "br-vxlan"
  management_bridge: "br-mgmt"
  provider_networks:
    - network:
        container_bridge: "br-mgmt"
        container_type: "veth"
        container_interface: "eth1"
        ip_from_q: "container"
        type: "raw"
        group_binds:
          - all_containers
          - hosts
        is_container_address: true
        is_ssh_address: true
    - network:
        container_bridge: "br-vxlan"
        container_type: "veth"
        container_interface: "eth10"
        ip_from_q: "tunnel"
        type: "vxlan"
        range: "1:1000"
        net_name: "vxlan"
        group_binds:
          - neutron_linuxbridge_agent
    - network:
        container_bridge: "br-vlan"
        container_type: "veth"
        container_interface: "eth12"
        host_bind_override: "eno2"
        type: "flat"
        net_name: "flat"
        group_binds:
          - neutron_linuxbridge_agent
    - network:
        container_bridge: "br-vlan"
        container_type: "veth"
        container_interface: "eth11"
        type: "vlan"
        range: "100:500"
        net_name: "vlan"
        group_binds:
          - neutron_linuxbridge_agent
    - network:
        container_bridge: "br-storage"
        container_type: "veth"
        container_interface: "eth2"
        ip_from_q: "storage"
        type: "raw"
        group_binds:
          - glance_api
          - cinder_api
          - cinder_volume
          - nova_compute

###
### Infrastructure
###

# galera, memcache, rabbitmq, utility
shared-infra_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# repository (apt cache, python packages, etc)
repo-infra_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# rsyslog server
log_hosts:
  cloudsrv002:
    ip: 192.168.0.12

###
### OpenStack
###

# keystone
identity_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# cinder api services
storage-infra_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
  cloudsrv002:
    ip: 192.168.0.12
    container_vars:
       limit_container_types: glance
       glance_nfs_client:
         - server: "192.168.0.14"
           remote_path: "/mnt/glance-storage"
           local_path: "/var/lib/glance/images"
           type: "nfs"
           options: "_netdev,auto"
  cloudsrv003:
    ip: 192.168.0.13
    container_vars:
       limit_container_types: glance
       glance_nfs_client:
         - server: "192.168.0.14"
           remote_path: "/mnt/glance-storage"
           local_path: "/var/lib/glance/images"
           type: "nfs"
           options: "_netdev,auto"
  cloudsrv004:
    ip: 192.168.0.14
    container_vars:
       limit_container_types: glance
       glance_nfs_client:
         - server: "192.168.0.14"
           remote_path: "/mnt/glance-storage"
           local_path: "/var/lib/glance/images"
           type: "nfs"
           options: "_netdev,auto"

# nova api, conductor, etc services
compute-infra_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# heat
orchestration_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# horizon
dashboard_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# neutron server, agents (L3, etc)
network_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# ceilometer (telemetry API)
metering-infra_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# aodh (telemetry alarm service)
metering-alarm_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# gnocchi (telemetry metrics storage)
metrics_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# nova hypervisors
compute_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# ceilometer compute agent (telemetry)
metering-compute_hosts:
  cloudsrv002:
    ip: 192.168.0.12
  cloudsrv003:
    ip: 192.168.0.13
  cloudsrv004:
    ip: 192.168.0.14

# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
  cloudsrv002:
    ip: 192.168.0.12
    container_vars:
      cinder_backends:
        limit_container_types: cinder_volume
        nfs_volume:
          volume_backend_name: NFS_CLOUDSRV002
          volume_driver: cinder.volume.drivers.nfs.NfsDriver
          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
          nfs_shares_config: /etc/cinder/nfs_shares
          shares:
            - {ip: "192.168.0.12", share: "/cinder-volume"}
            - {ip: "192.168.0.13", share: "/cinder-volume"}
  cloudsrv003:
    ip: 192.168.0.13
    container_vars:
      cinder_backends:
        limit_container_types: cinder_volume
        nfs_volume:
          volume_backend_name: NFS_CLOUDSRV002
          volume_driver: cinder.volume.drivers.nfs.NfsDriver
          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
          nfs_shares_config: /etc/cinder/nfs_shares
          shares:
            - {ip: "192.168.0.12", share: "/cinder-volume"}
            - {ip: "192.168.0.13", share: "/cinder-volume"}
  cloudsrv004:
    ip: 192.168.0.14
    container_vars:
      cinder_backends:
        limit_container_types: cinder_volume
        nfs_volume:
          volume_backend_name: NFS_CLOUDSRV002
          volume_driver: cinder.volume.drivers.nfs.NfsDriver
          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
          nfs_shares_config: /etc/cinder/nfs_shares
          shares:
            - {ip: "192.168.0.12", share: "/cinder-volume"}
            - {ip: "192.168.0.13", share: "/cinder-volume"}

Revision history for this message
Jean-Philippe Evrard (jean-philippe-evrard) wrote :

What version are you running? Is that master?
Was there any task that failed? Could you post a run log?

Thank you in advance.

Changed in openstack-ansible:
status: New → Incomplete
Revision history for this message
Jean-Philippe Evrard (jean-philippe-evrard) wrote :

On top of that, was spice properly installed in that container?
What is the issue in more details?

Revision history for this message
Launchpad Janitor (janitor) wrote :

[Expired for openstack-ansible because there has been no activity for 60 days.]

Changed in openstack-ansible:
status: Incomplete → Expired
To post a comment you must log in.
This report contains Public information  
Everyone can see this information.

Other bug subscribers

Remote bug watches

Bug watches keep track of this bug in other bug trackers.