In convergence, if we fail to acquire the lock on a resource because another traversal is still acting on it, there is a race where we attempt to update the resource with the current traversal ID and any new requirements. If the previous update finishes just after we try to get the lock but before this call then it will succeed, which would be bad.
In the usual case where the resource is still locked, the call will fail with an UpdateInProgress exception, which what we want anyway, but there will be an ERROR-level log reported:
Oct 09 16:18:58.527198 ubuntu-xenial-ovh-bhs1-11290899 heat-engine[8340]: ERROR root [None req-743503a9-1b58-4706-a575-0eaea5fc72a0 demo None] Original exception being dropped: ['Traceback (most recent call last):\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 1384, in update_convergence\n runner(timeout=timeout, progress_callback=progress_callback)\n', ' File "/opt/stack/new/heat/heat/engine/scheduler.py", line 168, in __call__\n progress_callback=progress_callback):\n', ' File "/opt/stack/new/heat/heat/engine/scheduler.py", line 244, in as_task\n self.start(timeout=timeout)\n', ' File "/opt/stack/new/heat/heat/engine/scheduler.py", line 190, in start\n self.step()\n', ' File "/opt/stack/new/heat/heat/engine/scheduler.py", line 217, in step\n poll_period = next(self._runner)\n', ' File "/opt/stack/new/heat/heat/engine/scheduler.py", line 366, in wrapper\n subtask = next(parent)\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 1556, in update\n with self._action_recorder(action, UpdateReplace):\n', ' File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__\n return self.gen.next()\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 843, in _action_recorder\n LOG.info(\'Update in progress for %s\', self.name)\n', ' File "/usr/local/lib/python2.7/dist-packages/oslo_utils/excutils.py", line 220, in __exit__\n self.force_reraise()\n', ' File "/usr/local/lib/python2.7/dist-packages/oslo_utils/excutils.py", line 196, in force_reraise\n six.reraise(self.type_, self.value, self.tb)\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 839, in _action_recorder\n set_in_progress()\n', ' File "/usr/local/lib/python2.7/dist-packages/tenacity/__init__.py", line 171, in wrapped_f\n return self.call(f, *args, **kw)\n', ' File "/usr/local/lib/python2.7/dist-packages/tenacity/__init__.py", line 248, in call\n start_time=start_time)\n', ' File "/usr/local/lib/python2.7/dist-packages/tenacity/__init__.py", line 216, in iter\n raise RetryError(
Oct 09 16:18:58.527838 ubuntu-xenial-ovh-bhs1-11290899 heat-engine[8340]: fut).reraise()\n', ' File "/usr/local/lib/python2.7/dist-packages/tenacity/__init__.py", line 297, in reraise\n raise self.last_attempt.result()\n', ' File "/usr/local/lib/python2.7/dist-packages/concurrent/futures/_base.py", line 422, in result\n return self.__get_result()\n', ' File "/usr/local/lib/python2.7/dist-packages/tenacity/__init__.py", line 251, in call\n result = fn(*args, **kwargs)\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 836, in set_in_progress\n self.state_set(action, self.IN_PROGRESS, lock=lock_acquire)\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 2206, in state_set\n self.store(set_metadata, lock=lock)\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 2001, in store\n self._store_with_lock(rs, lock)\n', ' File "/opt/stack/new/heat/heat/engine/resource.py", line 2032, in _store_with_lock\n raise exception.UpdateInProgress(self.name)\n', 'UpdateInProgress: The resource test2 is already being updated.\n']: UpdateInProgress: The resource test2 is already being updated.
Fix proposed to branch: master /review. openstack. org/510674
Review: https:/