snapshot share data Sync with the source share

Bug #1733286 reported by haobing1
6
This bug affects 1 person
Affects Status Importance Assigned to Milestone
OpenStack Shared File Systems Service (Manila)
Won't Fix
Undecided
Unassigned

Bug Description

environment configuration:
[root@node-5 ~]# cat /etc/manila/manila.conf | grep -v ^$ | grep -v ^#
[DEFAULT]
api_paste_config = /etc/manila/api-paste.ini
state_path = /var/lib/manila
os_region_name = RegionOne
storage_availability_zone = nova
rootwrap_config = /etc/manila/rootwrap.conf
auth_strategy = keystone
enabled_share_backends = london
network_api_class = manila.network.neutron.neutron_network_plugin.NeutronNetworkPlugin
osapi_share_listen = 0.0.0.0
log_dir = /var/log/manila
rpc_backend = rabbit
control_exchange = openstack
nova_admin_auth_url=http://192.168.90.2:5000/v2.0
notification_driver=messaging
nova_admin_tenant_name=services
nova_admin_username=nova
nova_admin_password=T94BK4z7
nova_catalog_info=compute:nova:publicURL
nova_api_insecure=False
neutron_api_insecure=False
neutron_auth_strategy=keystone
neutron_admin_tenant_name=services
neutron_url=http://192.168.90.2:9696
neutron_region_name=RegionOne
neutron_admin_password=VAcDV7qo
cinder_catalog_info=volume:cinder:publicURL
cinder_admin_username=cinder
cinder_admin_password=LZT42RyU
cinder_cross_az_attach=True
cinder_api_insecure=False
cinder_admin_auth_url=http://192.168.90.2:5000/v2.0
cinder_http_retries=3
cinder_admin_tenant_name=services
neutron_admin_username=neutron
neutron_admin_auth_url=http://192.168.90.2:5000/v2.0
nova_catalog_admin_info=compute:nova:adminURL
neutron_url_timeout=300
quota_gigabytes = 0
quota_snapshots = 0
quota_shares = 0
quota_share_networks = 0
[cinder]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://manila:rsUy5k9T@192.168.90.2/manila
[keystone_authtoken]
auth_uri = http://192.168.90.2:35357/v3
auth_version = v3.0
signing_dir = /tmp/keystone-signing-manila
admin_user=manila
admin_tenant_name=services
auth_port=35357
auth_protocol=http
admin_password=WQszVZ8K
auth_host=192.168.90.2
[matchmaker_redis]
[neutron]
[nova]
[oslo_concurrency]
lock_path = /tmp/manila/manila_locks
[oslo_messaging_amqp]
server_request_prefix = exclusive
broadcast_prefix = broadcast
group_request_prefix = unicast
container_name = guest
idle_timeout = 0
trace = False
allow_insecure_clients = False
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
amqp_durable_queues = False
rabbit_hosts = 192.168.90.2:5672
rabbit_use_ssl = False
rabbit_userid = nova
rabbit_password = OVCCYQPg
rabbit_virtual_host = /
rabbit_ha_queues = False
[oslo_middleware]
[oslo_policy]
[london]
share_mount_path=/shares
max_time_to_attach=120
automatic_share_server_cleanup=True
delete_share_server_with_last_share=False
share_helpers=CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess,NFS=manila.share.drivers.helpers.NFSHelper
smb_template_config_path=$state_path/smb.conf
share_volume_fstype=ext4
unmanage_remove_access_rules=False
share_backend_name=london
volume_name_template=manila-share-%s
driver_handles_share_servers=True
max_time_to_create_volume=180
share_driver=manila.share.drivers.generic.GenericShareDriver
service_instance_smb_config_path=$share_mount_path/smb.conf
volume_snapshot_name_template=manila-snapshot-%s
manila_service_keypair_name=manila-service
max_time_to_build_instance=300
service_instance_name_template=manila_service_instance_%s
interface_driver=manila.network.linux.interface.OVSInterfaceDriver
service_network_cidr=10.254.0.0/16
path_to_public_key=/root/.ssh/id_rsa.pub
service_network_name=manila_service_network
path_to_private_key=/root/.ssh/id_rsa
service_instance_user=manila
connect_share_server_to_tenant_network=False
service_instance_network_helper_type=neutron
service_instance_security_group=manila-service
service_instance_flavor_id=1
service_instance_password=manila
service_image_name=manila-service-image
service_network_division_mask=28

the cinder.conf:
[root@node-2 home]# cat /etc/cinder/cinder.conf | grep -v ^# | grep -v ^$
[DEFAULT]
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = backups
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
glance_api_servers = 192.168.10.3:9292
glance_api_version = 2
enable_v1_api = True
enable_v2_api = True
enable_v3_api = True
api_rate_limit = False
host = cinder
storage_availability_zone = nova
default_availability_zone = nova
auth_strategy = keystone
enabled_backends = ceph
osapi_volume_listen = 0.0.0.0
osapi_volume_workers = 2
os_region_name = RegionOne
rbd_flatten_volume_from_snapshot = true
backup_driver = cinder.backup.drivers.ceph
quota_volumes = 10
quota_snapshots = 10
quota_gigabytes = 1000
nova_catalog_info = compute:Compute Service:publicURL
nova_catalog_admin_info = compute:Compute Service:adminURL
debug = false
verbose = True
log_dir = /var/log/cinder
use_syslog = True
syslog_log_facility = LOG_LOCAL3
rpc_backend = rabbit
control_exchange = cinder
api_paste_config = /etc/cinder/api-paste.ini
use_syslog_rfc_format=True
[BACKEND]
[BRCD_FABRIC_EXAMPLE]
[CISCO_FABRIC_EXAMPLE]
[COORDINATION]
[FC-ZONE-MANAGER]
[KEYMGR]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://cinder:XoWmau5S@127.0.0.1/cinder?charset=utf8&
idle_timeout = 3600
max_pool_size = 10
max_retries = -1
max_overflow = 10
[keystone_authtoken]
auth_uri = http://192.168.10.3:5000/v3
auth_version = v3.0
signing_dir = /tmp/keystone-signing-cinder
admin_password=MbRJTrSj
admin_tenant_name=services
identity_uri=http://192.168.10.3:35357
admin_user=cinder
signing_dirname=/tmp/keystone-signing-cinder
[matchmaker_redis]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
amqp_durable_queues = False
kombu_reconnect_delay = 5.0
rabbit_hosts = 192.168.10.3:5672
rabbit_use_ssl = False
rabbit_userid = nova
rabbit_password = we3nX7wi
rabbit_virtual_host = /
rabbit_ha_queues = False
heartbeat_timeout_threshold = 0
heartbeat_rate = 2
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[ssl]
[ceph]
volume_backend_name=ceph
rbd_flatten_volume_from_snapshot = True
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid=a5d0dd94-57c4-ae55-ffe0-7e3732a24455
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_pool=volumes
rbd_user=volumes

bug reproduction;
1. create a share :
[root@node-2 home]# manila show 295ca2f7-ba23-44f3-bacb-7abbf9e21653
+-----------------------------+-----------------------------------------------------------------------+
| Property | Value |
+-----------------------------+-----------------------------------------------------------------------+
| status | available |
| share_type_name | default_share_type |
| description | None |
| availability_zone | nova |
| share_network_id | 07010f37-c6c2-44b2-ae82-50fd783fd8bd |
| export_locations | |
| | path = 10.254.0.14:/shares/share-d7fe697f-a017-47dd-8888-69591635ec8e |
| | preferred = False |
| | is_admin_only = False |
| | id = 16926e5f-d425-4fd5-9fce-242bc1fd169d |
| | share_instance_id = d7fe697f-a017-47dd-8888-69591635ec8e |
| share_server_id | 32c3ac58-a027-4611-8fbb-0cfd45f02887 |
| host | node-2.domain.tld@london#london |
| access_rules_status | active |
| snapshot_id | None |
| is_public | False |
| task_state | None |
| snapshot_support | True |
| id | 295ca2f7-ba23-44f3-bacb-7abbf9e21653 |
| size | 1 |
| name | hb |
| share_type | 52417b1d-ca28-47ab-a778-9214cc774de1 |
| has_replicas | False |
| replication_type | None |
| created_at | 2017-11-20T04:31:18.000000 |
| share_proto | NFS |
| consistency_group_id | None |
| source_cgsnapshot_member_id | None |
| project_id | 310631233ce84963a2124633cc584058 |
| metadata | {} |
+-----------------------------+-----------------------------------------------------------------------+
2.add the access to 192.168.111.19 vm
[root@node-2 home]# manila access-list 295ca2f7-ba23-44f3-bacb-7abbf9e21653
+--------------------------------------+-------------+----------------+--------------+--------+
| id | access_type | access_to | access_level | state |
+--------------------------------------+-------------+----------------+--------------+--------+
| 0240bce6-31b8-40e1-aef2-0d2c6aee4db7 | ip | 192.168.111.20 | rw | active |
| 20633655-1366-4936-9f9f-120289ee2b82 | ip | 192.168.111.19 | rw | active |
+--------------------------------------+-------------+----------------+--------------+--------+

3. mount the share in the 192.168.111.19 vm

4. create two file 'aaa', 'bbb' in mount nfs share dir

5.create a snapshot from the share
[root@node-2 home]# manila snapshot-show 644e8346-b871-4762-a9e3-c23a709ba512
+-------------------+--------------------------------------+
| Property | Value |
+-------------------+--------------------------------------+
| status | available |
| share_id | 295ca2f7-ba23-44f3-bacb-7abbf9e21653 |
| description | None |
| created_at | 2017-11-20T05:17:28.000000 |
| share_proto | NFS |
| provider_location | e7914f82-82cd-4149-8d79-4a52d519b301 |
| id | 644e8346-b871-4762-a9e3-c23a709ba512 |
| size | 1 |
| share_size | 1 |
| name | hb_snap |
+-------------------+--------------------------------------+

6.create a share from the snapshot
[root@node-2 home]# manila show fc745b62-d9c1-4d2c-9957-47437e276f13
+-----------------------------+-----------------------------------------------------------------------+
| Property | Value |
+-----------------------------+-----------------------------------------------------------------------+
| status | available |
| share_type_name | default_share_type |
| description | Share from a snapshot. |
| availability_zone | nova |
| share_network_id | 07010f37-c6c2-44b2-ae82-50fd783fd8bd |
| export_locations | |
| | path = 10.254.0.14:/shares/share-2cc3de10-290b-4acb-bd5a-1c077a869ac3 |
| | preferred = False |
| | is_admin_only = False |
| | id = b3e4ea66-d958-428a-937a-3dbc333b716a |
| | share_instance_id = 2cc3de10-290b-4acb-bd5a-1c077a869ac3 |
| share_server_id | 32c3ac58-a027-4611-8fbb-0cfd45f02887 |
| host | node-2.domain.tld@london#london |
| access_rules_status | active |
| snapshot_id | 644e8346-b871-4762-a9e3-c23a709ba512 |
| is_public | False |
| task_state | None |
| snapshot_support | True |
| id | fc745b62-d9c1-4d2c-9957-47437e276f13 |
| size | 1 |
| name | from_snap |
| share_type | 52417b1d-ca28-47ab-a778-9214cc774de1 |
| has_replicas | False |
| replication_type | None |
| created_at | 2017-11-20T05:20:13.000000 |
| share_proto | NFS |
| consistency_group_id | None |
| source_cgsnapshot_member_id | None |
| project_id | 310631233ce84963a2124633cc584058 |
| metadata | {u'source': u'snapshot'} |
+-----------------------------+-----------------------------------------------------------------------+
7.add the access for snapshot share to 192.168.111.19 vm also
[root@node-2 home]# manila access-list fc745b62-d9c1-4d2c-9957-47437e276f13
+--------------------------------------+-------------+----------------+--------------+--------+
| id | access_type | access_to | access_level | state |
+--------------------------------------+-------------+----------------+--------------+--------+
| 6989bd82-cd1e-4e53-90df-be37a617a550 | ip | 192.168.111.20 | rw | active |
| b007740c-d01c-49f8-a063-633f8f0f78b0 | ip | 192.168.111.19 | rw | active |
+--------------------------------------+-------------+----------------+--------------+--------+

8.mount snapshot share on another dir in the same 192.168.111.19 vm

9.rm the 'aaa' file in the vm source share nfs dir

10. now, we foud the 'aaa' file was also disappear in the snapshot share nfs dir in the 192.168.111.19 vm
 this is incorrect,the snapshot share should independent,it should not disappear the 'aaa' file.

11.but,in the nfs server vm we find the snapshot share have the all 'aaa' and 'bbb' file is ok.

12. cat /etc/exports
10.254.0.14:/shares/share-d7fe697f-a017-47dd-8888-69591635ec8e 192.168.111.19(rw,sync,wdelay,hide,nocrossmnt,insecure,no_root-squash,no_all_squash,no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534,sec=sys,rw,no_root_squash,no_all_squash)
10.254.0.14:/shares/share-9ef8702a-6b38-4ce3-bfa9-7df4a9f0e209 192.168.111.19(rw,sync,wdelay,hide,nocrossmnt,insecure,no_root-squash,no_all_squash,no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534,sec=sys,rw,no_root_squash,no_all_squash)

if the share export in front ,the original share instead of the new share that was created from snapshot, but if the snapshot share export in front,the snapshot share instead of the original share

13. the share server os info: ubuntu 14.04.2 LTS

haobing1 (haobing1)
description: updated
haobing1 (haobing1)
description: updated
haobing1 (haobing1)
description: updated
haobing1 (haobing1)
description: updated
Revision history for this message
Tom Barron (tpb) wrote :

Thank you for the detailed report.

Note that upstream mitaka branch is EOL and I also believe service images based on ubuntu 14.04.2 LTS are stale. It is possible that there are downstream distributions that would still fix these, but I suspect upstream the question will be whether this issue is re-producible on master or a supported stable branch using the current reference image available here:

https://tarballs.openstack.org/manila-image-elements/images/manila-service-image-master.qcow2

From our IRC conversation it was clear that /etc/exports and the kernel export state on your server were in sync, so on the face of things the problem you describe seems like an OS bug because manila set up the exports correctly (and refreshed the kernel state to reflect the update) but in fact only the firs share was being exported, and it was exported in the place of the second share.

Revision history for this message
shuaili.wang (shuaili.wang) wrote :

Using the current reference image
https://tarballs.openstack.org/manila-image-elements/images/manila-service-image-master.qcow2
It still appears to be a problem with this bug description,that is,
if the share export in front ,the original share instead of the new share that was created from snapshot, but if the snapshot share export in front,the snapshot share instead of the original share

Revision history for this message
Tom Barron (tpb) wrote :

From irc manila community meeting 15 February:

<bswartz> This is generic driver specific
<bswartz> I think there are caching/timing issues with how it does snapshots
<bswartz> We just take a cinder snapshot and assume that the right thing happens
<bswartz> If there's unwritten data in the service VM's cache though, it can get missed by the snapshot
<dustins> bswartz: I guess it depends on how driven we are to fix this thing with the Generic driver
<bswartz> The correct behavior would probably be to flush the write caches of the service VM before taking the cinder snasphot

Revision history for this message
Tom Barron (tpb) wrote :

more:

<bswartz> Yeah given that it's probably not easy to reproduce this problem, it would be tough to test a fix
<bswartz> But the fix could be as easy as SSH to service VM and invoke "sync" before cinder snapshot

Revision history for this message
Jason Grosso (jgrosso) wrote :

Hey Shualli is this still an issue you are running into?

Revision history for this message
Jason Grosso (jgrosso) wrote :

Has anyone been able to take a closer look at this issues, and try and re-produce it?

Revision history for this message
Jason Grosso (jgrosso) wrote :

closing issues please re-open if you are still having this issue

Changed in manila:
status: New → Won't Fix
To post a comment you must log in.
This report contains Public information  
Everyone can see this information.

Other bug subscribers

Remote bug watches

Bug watches keep track of this bug in other bug trackers.