snapshot share data Sync with the source share
Affects | Status | Importance | Assigned to | Milestone | |
---|---|---|---|---|---|
OpenStack Shared File Systems Service (Manila) |
Won't Fix
|
Undecided
|
Unassigned |
Bug Description
environment configuration:
[root@node-5 ~]# cat /etc/manila/
[DEFAULT]
api_paste_config = /etc/manila/
state_path = /var/lib/manila
os_region_name = RegionOne
storage_
rootwrap_config = /etc/manila/
auth_strategy = keystone
enabled_
network_api_class = manila.
osapi_share_listen = 0.0.0.0
log_dir = /var/log/manila
rpc_backend = rabbit
control_exchange = openstack
nova_admin_
notification_
nova_admin_
nova_admin_
nova_admin_
nova_catalog_
nova_api_
neutron_
neutron_
neutron_
neutron_url=http://
neutron_
neutron_
cinder_
cinder_
cinder_
cinder_
cinder_
cinder_
cinder_
cinder_
neutron_
neutron_
nova_catalog_
neutron_
quota_gigabytes = 0
quota_snapshots = 0
quota_shares = 0
quota_share_
[cinder]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql:
[keystone_
auth_uri = http://
auth_version = v3.0
signing_dir = /tmp/keystone-
admin_user=manila
admin_tenant_
auth_port=35357
auth_protocol=http
admin_password=
auth_host=
[matchmaker_redis]
[neutron]
[nova]
[oslo_concurrency]
lock_path = /tmp/manila/
[oslo_messaging
server_
broadcast_prefix = broadcast
group_request_
container_name = guest
idle_timeout = 0
trace = False
allow_insecure_
[oslo_messaging
[oslo_messaging
amqp_durable_queues = False
rabbit_hosts = 192.168.90.2:5672
rabbit_use_ssl = False
rabbit_userid = nova
rabbit_password = OVCCYQPg
rabbit_virtual_host = /
rabbit_ha_queues = False
[oslo_middleware]
[oslo_policy]
[london]
share_mount_
max_time_
automatic_
delete_
share_helpers=
smb_template_
share_volume_
unmanage_
share_backend_
volume_
driver_
max_time_
share_driver=
service_
volume_
manila_
max_time_
service_
interface_
service_
path_to_
service_
path_to_
service_
connect_
service_
service_
service_
service_
service_
service_
the cinder.conf:
[root@node-2 home]# cat /etc/cinder/
[DEFAULT]
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = backups
backup_
backup_ceph_pool = backups
backup_
backup_
glance_api_servers = 192.168.10.3:9292
glance_api_version = 2
enable_v1_api = True
enable_v2_api = True
enable_v3_api = True
api_rate_limit = False
host = cinder
storage_
default_
auth_strategy = keystone
enabled_backends = ceph
osapi_volume_listen = 0.0.0.0
osapi_volume_
os_region_name = RegionOne
rbd_flatten_
backup_driver = cinder.
quota_volumes = 10
quota_snapshots = 10
quota_gigabytes = 1000
nova_catalog_info = compute:Compute Service:publicURL
nova_catalog_
debug = false
verbose = True
log_dir = /var/log/cinder
use_syslog = True
syslog_log_facility = LOG_LOCAL3
rpc_backend = rabbit
control_exchange = cinder
api_paste_config = /etc/cinder/
use_syslog_
[BACKEND]
[BRCD_FABRIC_
[CISCO_
[COORDINATION]
[FC-ZONE-MANAGER]
[KEYMGR]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql:
idle_timeout = 3600
max_pool_size = 10
max_retries = -1
max_overflow = 10
[keystone_
auth_uri = http://
auth_version = v3.0
signing_dir = /tmp/keystone-
admin_password=
admin_tenant_
identity_uri=http://
admin_user=cinder
signing_
[matchmaker_redis]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging
[oslo_messaging
[oslo_messaging
amqp_durable_queues = False
kombu_reconnect
rabbit_hosts = 192.168.10.3:5672
rabbit_use_ssl = False
rabbit_userid = nova
rabbit_password = we3nX7wi
rabbit_virtual_host = /
rabbit_ha_queues = False
heartbeat_
heartbeat_rate = 2
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versioned
[ssl]
[ceph]
volume_
rbd_flatten_
volume_
rbd_secret_
rbd_ceph_
rbd_pool=volumes
rbd_user=volumes
bug reproduction;
1. create a share :
[root@node-2 home]# manila show 295ca2f7-
+------
| Property | Value |
+------
| status | available |
| share_type_name | default_share_type |
| description | None |
| availability_zone | nova |
| share_network_id | 07010f37-
| export_locations | |
| | path = 10.254.
| | preferred = False |
| | is_admin_only = False |
| | id = 16926e5f-
| | share_instance_id = d7fe697f-
| share_server_id | 32c3ac58-
| host | node-2.
| access_rules_status | active |
| snapshot_id | None |
| is_public | False |
| task_state | None |
| snapshot_support | True |
| id | 295ca2f7-
| size | 1 |
| name | hb |
| share_type | 52417b1d-
| has_replicas | False |
| replication_type | None |
| created_at | 2017-11-
| share_proto | NFS |
| consistency_
| source_
| project_id | 310631233ce8496
| metadata | {} |
+------
2.add the access to 192.168.111.19 vm
[root@node-2 home]# manila access-list 295ca2f7-
+------
| id | access_type | access_to | access_level | state |
+------
| 0240bce6-
| 20633655-
+------
3. mount the share in the 192.168.111.19 vm
4. create two file 'aaa', 'bbb' in mount nfs share dir
5.create a snapshot from the share
[root@node-2 home]# manila snapshot-show 644e8346-
+------
| Property | Value |
+------
| status | available |
| share_id | 295ca2f7-
| description | None |
| created_at | 2017-11-
| share_proto | NFS |
| provider_location | e7914f82-
| id | 644e8346-
| size | 1 |
| share_size | 1 |
| name | hb_snap |
+------
6.create a share from the snapshot
[root@node-2 home]# manila show fc745b62-
+------
| Property | Value |
+------
| status | available |
| share_type_name | default_share_type |
| description | Share from a snapshot. |
| availability_zone | nova |
| share_network_id | 07010f37-
| export_locations | |
| | path = 10.254.
| | preferred = False |
| | is_admin_only = False |
| | id = b3e4ea66-
| | share_instance_id = 2cc3de10-
| share_server_id | 32c3ac58-
| host | node-2.
| access_rules_status | active |
| snapshot_id | 644e8346-
| is_public | False |
| task_state | None |
| snapshot_support | True |
| id | fc745b62-
| size | 1 |
| name | from_snap |
| share_type | 52417b1d-
| has_replicas | False |
| replication_type | None |
| created_at | 2017-11-
| share_proto | NFS |
| consistency_
| source_
| project_id | 310631233ce8496
| metadata | {u'source': u'snapshot'} |
+------
7.add the access for snapshot share to 192.168.111.19 vm also
[root@node-2 home]# manila access-list fc745b62-
+------
| id | access_type | access_to | access_level | state |
+------
| 6989bd82-
| b007740c-
+------
8.mount snapshot share on another dir in the same 192.168.111.19 vm
9.rm the 'aaa' file in the vm source share nfs dir
10. now, we foud the 'aaa' file was also disappear in the snapshot share nfs dir in the 192.168.111.19 vm
this is incorrect,the snapshot share should independent,it should not disappear the 'aaa' file.
11.but,in the nfs server vm we find the snapshot share have the all 'aaa' and 'bbb' file is ok.
12. cat /etc/exports
10.254.
10.254.
if the share export in front ,the original share instead of the new share that was created from snapshot, but if the snapshot share export in front,the snapshot share instead of the original share
13. the share server os info: ubuntu 14.04.2 LTS
description: | updated |
description: | updated |
description: | updated |
description: | updated |
Thank you for the detailed report.
Note that upstream mitaka branch is EOL and I also believe service images based on ubuntu 14.04.2 LTS are stale. It is possible that there are downstream distributions that would still fix these, but I suspect upstream the question will be whether this issue is re-producible on master or a supported stable branch using the current reference image available here:
https:/ /tarballs. openstack. org/manila- image-elements/ images/ manila- service- image-master. qcow2
From our IRC conversation it was clear that /etc/exports and the kernel export state on your server were in sync, so on the face of things the problem you describe seems like an OS bug because manila set up the exports correctly (and refreshed the kernel state to reflect the update) but in fact only the firs share was being exported, and it was exported in the place of the second share.