[un-patched but some logs added charm from quincy/stable rev. 65]
-> config_changed function is called in every update-status hook.
diff --git a/charm.orig/reactive/ceph_fs.py b/charm/reactive/ceph_fs.py
index 8dc9898..b820b36 100644
--- a/charm.orig/reactive/ceph_fs.py
+++ b/charm/reactive/ceph_fs.py
@@ -68,6 +68,8 @@ def config_changed(): subprocess.check_call(['sudo', 'systemctl', 'reset-failed', svc]) subprocess.check_call(['sudo', 'systemctl', 'restart', svc])
+ ch_core.hookenv.log(f'⚠️ systemctl restart {svc}',
+ ch_core.hookenv.WARNING)
except subprocess.CalledProcessError as exc:
# The service can be temporarily masked when booting, so
# skip that class of errors.
[un-patched but some logs added charm from quincy/stable rev. 65]
-> config_changed function is called in every update-status hook.
diff --git a/charm. orig/reactive/ ceph_fs. py b/charm/ reactive/ ceph_fs. py orig/reactive/ ceph_fs. py reactive/ ceph_fs. py
subprocess. check_call( ['sudo' , 'systemctl',
'reset- failed' , svc])
subprocess. check_call( ['sudo' , 'systemctl', 'restart', svc]) hookenv. log(f'⚠ ️ systemctl restart {svc}', hookenv. WARNING) CalledProcessEr ror as exc:
index 8dc9898..b820b36 100644
--- a/charm.
+++ b/charm/
@@ -68,6 +68,8 @@ def config_changed():
+ ch_core.
+ ch_core.
except subprocess.
# The service can be temporarily masked when booting, so
# skip that class of errors.
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph- fs/0.juju- log tracer> layer_openstack .py:64: default_ update_ status fs/0.juju- log Invoking reactive handler: reactive/ layer_openstack .py:64: default_ update_ status fs/0.juju- log tracer: set flag run-default- update- status fs/0.juju- log tracer: set flag is-update- status- hook fs/0.juju- log tracer> /ceph-mds/ requires. py:31:joined: ceph-mds /ceph-mds/ requires. py:35:changed: ceph-mds /tls-certificat es/requires. py:117: broken: certificates ceph_fs. py:80:storage_ ceph_connected layer_openstack .py:170: default_ config_ rendered layer_openstack .py:82: check_really_ is_update_ status layer_openstack .py:93: run_default_ update_ status fs/0.juju- log Invoking reactive handler: reactive/ ceph_fs. py:80:storage_ ceph_connected fs/0.juju- log Request already sent but not complete, not sending new request fs/0.juju- log tracer: cleared flag ceph-mds. pools.available fs/0.juju- log Request already sent but not complete, not sending new request fs/0.juju- log Request already sent but not complete, not sending new request fs/0.juju- log Invoking reactive handler: reactive/ layer_openstack .py:82: check_really_ is_update_ status fs/0.juju- log Invoking reactive handler: reactive/ layer_openstack .py:93: run_default_ update_ status fs/0.juju- log tracer> update- status layer_openstack .py:93: run_default_ update_ status fs/0.juju- log Invoking reactive handler: reactive/ layer_openstack .py:170: default_ config_ rendered fs/0.update- status enabled fs/0.juju- log service ceph-mds@ juju-795de1- 0-lxd-0 already enabled fs/0.update- status active fs/0.juju- log Invoking reactive handler: hooks/relations /tls-certificat es/requires. py:117: broken: certificates fs/0.juju- log Invoking reactive handler: hooks/relations /ceph-mds/ requires. py:31:joined: ceph-mds fs/0.juju- log Invoking reactive handler: hooks/relations /ceph-mds/ requires. py:35:changed: ceph-mds fs/0.juju- log changed broker_req: [{'app-name': 'cephfs', 'compression- algorithm' : None, 'compression- max-blob- size': None, 'compression- max-blob- size-hdd' : None, 'compression- max-blob- size-ssd' : None, 'compression- min-blob- size': None, 'compression- min-blob- size-hdd' : None, 'compression- min-blob- size-ssd' : None, 'compression-mode': None, 'compression- required- ratio': None, 'crush-profile': None, 'group': None, 'group-namespace': None, 'max-bytes': None, 'max-objects': None, 'name': 'ceph-fs_data', 'op': 'create-pool', 'pg_num': None, 'rbd-mirroring- mode': 'pool', 'replicas': 3, 'weight': 4.0}, {'app-name': 'cephfs', 'compression- algorithm' : None, 'compression- max-blob- size': None, 'compression- max-blob- size-hdd' : None, 'compression- max-blob- size-ssd' : None, 'compression- min-blob- size': None, 'compression- min-blob- size-hdd' : None, 'compression- min-blob- size-ssd' : None, 'compression-mode': None, 'compression- required- ratio': None, 'crush-profile': None, 'group': None, 'group-namespace': None, 'max-bytes': None, 'max-objects': None, 'name': 'ceph-fs_metadata', 'op': 'create-pool', 'pg_num': None, 'rbd-mirroring- mode': 'pool', 'replicas': 3, 'weight': 1.0}, {'data_pool': 'ceph-fs_data', 'extra_pools': [], 'mds_name': 'ceph-fs', 'metadata_pool': 'ceph-fs_metadata', 'op': 'create-cephfs'}] fs/0.juju- log Setting ceph-client. pools.available fs/0.juju- log tracer> pools.available ceph_fs. py:42:config_ changed fs/0.juju- log Invoking reactive handler: reactive/ ceph_fs. py:42:config_ changed fs/0.update- status creating /var/lib/ ceph/mds/ ceph-juju- 795de1- 0-lxd-0/ keyring fs/0.update- status added entity mds.juju- 795de1- 0-lxd-0 auth(key= AQAwZqdmsQVCBxA Aq4AI6aPhqFoe8K 9/XwcaMQ= =) fs/0.juju- log Changing permissions on existing content: 33184 -> 416 fs/0.juju- log ⚠️ systemctl restart <email address hidden> fs/0.juju- log Running _assess_status() fs/0.update- status active fs/0.juju- log tracer> status- hook /ceph-mds/ requires. py:31:joined: ceph-mds /ceph-mds/ requires. py:35:changed: ceph-mds /tls-certificat es/requires. py:117: broken: certificates ceph_fs. py:42:config_ changed ceph_fs. py:80:storage_ ceph_connected layer_openstack .py:170: default_ config_ rendered uniter. operation ran "update-status" hook (via explicit, bespoke hook script)
tracer: hooks phase, 1 handlers queued
tracer: ++ queue handler reactive/
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
tracer: main dispatch loop, 7 handlers queued
tracer: ++ queue handler hooks/relations
tracer: ++ queue handler hooks/relations
tracer: ++ queue handler hooks/relations
tracer: ++ queue handler reactive/
tracer: ++ queue handler reactive/
tracer: ++ queue handler reactive/
tracer: ++ queue handler reactive/
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
tracer: cleared flag run-default-
tracer: -- dequeue handler reactive/
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
tracer: set flag ceph-mds.
tracer: ++ queue handler reactive/
unit-ceph-fs-0: 11:57:15 INFO unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:15 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:22 WARNING unit.ceph-
unit-ceph-fs-0: 11:57:22 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:22 DEBUG unit.ceph-
unit-ceph-fs-0: 11:57:22 DEBUG unit.ceph-
tracer: cleared flag is-update-
tracer: ++ queue handler hooks/relations
tracer: ++ queue handler hooks/relations
tracer: ++ queue handler hooks/relations
tracer: ++ queue handler reactive/
tracer: ++ queue handler reactive/
tracer: ++ queue handler reactive/
unit-ceph-fs-0: 11:57:22 INFO juju.worker.