diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index 6326520..ce3dc20 100644 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -344,9 +344,11 @@ class TestECObjectHandoff(ECProbeTest): resp_etag = self.get_object(container_name, object_name) self.assertEqual(resp_etag, new_contents.etag) - def _check_nodes(self, opart, onodes, container_name, object_name): + def _check_nodes(self, opart, onodes, container_name, object_name, + headers=None): found_frags = defaultdict(int) req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} + req_headers.update(headers or {}) for node in onodes + list(self.object_ring.get_more_nodes(opart)): try: headers = direct_client.direct_head_object( @@ -460,5 +462,82 @@ class TestECObjectHandoff(ECProbeTest): # ... all six unique self.assertEqual(len(frag2count), 6) + def test_ec_non_durable_read(self): + self.container_name = 'container-%s' % uuid4() + self.object_name = 'object-%s' % uuid4() + + # create EC container + headers = {'X-Storage-Policy': self.policy.name} + client.put_container(self.url, self.token, self.container_name, + headers=headers) + + # PUT object + contents = Body() + etag = client.put_object(self.url, self.token, + self.container_name, self.object_name, + contents=contents) + self.assertEqual(etag, contents.etag) + + opart, onodes = self.object_ring.get_nodes( + self.account, self.container_name, self.object_name) + + # find a primary servers that has both of it's devices in + # the primary node list + group_nodes_by_config = defaultdict(list) + for n in onodes: + group_nodes_by_config[self.config_number(n)].append(n) + double_disk_primary_config = double_disk_primary_nodes = None + for config_number, node_list in group_nodes_by_config.items(): + if len(node_list) > 1: + double_disk_primary_config, double_disk_primary_nodes = ( + config_number, node_list) + break + if not (double_disk_primary_config and double_disk_primary_nodes): + self.fail('EC ring did not disperse to all disks!') + + # ruin the .durable on the *majority* of devices + for node in onodes: + if node in double_disk_primary_nodes: + continue + part_dir = self.storage_dir('object', node, part=opart) + for dirs, subdirs, files in os.walk(part_dir): + for fname in files: + if fname.endswith('#d.data'): + durable = os.path.join(dirs, fname) + os.rename(durable, durable[:-7] + '.data') + break + try: + os.remove(os.path.join(part_dir, 'hashes.pkl')) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + # sanity - we have two durable frags + found_frags = self._check_nodes(opart, onodes, self.container_name, + self.object_name) + self.assertEqual(len(found_frags), 2) + # ... but six total + headers = {'X-Backend-Fragment-Preferences': '[]'} + found_frags = self._check_nodes(opart, onodes, self.container_name, + self.object_name, headers=headers) + self.assertEqual(len(found_frags), 6) + + # with so many frags it's easy to reason we can still read this object + resp_etag = self.get_object(self.container_name, self.object_name) + self.assertEqual(resp_etag, contents.etag) + + # a 4+2 will be robust to temporary unavailability of two nodes + Manager(['object-server']).stop(number=double_disk_primary_config) + resp_etag = self.get_object(self.container_name, self.object_name) + self.assertEqual(resp_etag, contents.etag) + + # after a reconstructor pass on frags are durable + Manager(['object-server']).start(number=double_disk_primary_config) + Manager(['object-reconstructor']).once() + found_frags = self._check_nodes(opart, onodes, self.container_name, + self.object_name) + self.assertEqual(len(found_frags), 6) + + if __name__ == '__main__': main()