# split components to 2 groups:
# components that should have pages on all numa nodes (such as dpdk)
@@ -1595,7 +1595,7 @@ class NodeAttributes(object):
nova_cpus = set(cpu_distribution['components'].get('nova', [])) numa_values = collections.defaultdict(int)
- for numa_node in topology['numa_nodes']:
+ for numa_node in topology.get('numa_nodes', {}): for cpu in numa_node['cpus']: if cpu in nova_cpus: numa_values[numa_node['id']] += 1
and here
diff --git a/nailgun/nailgun/policy/hugepages_distribution.py b/nailgun/nailgun/policy/hugepages_distribution.py
index 264f52e..d6b4412 100644
--- a/nailgun/nailgun/policy/hugepages_distribution.py
+++ b/nailgun/nailgun/policy/hugepages_distribution.py
@@ -132,7 +132,7 @@ def distribute_hugepages(numa_topology, components, numa_sort_func):
any_comps = [Component(comp) for comp in components['any']]
numa_nodes = []
- for numa_node in numa_topology['numa_nodes']:
+ for numa_node in numa_topology.get('numa_nodes', {}):
# converting memory to KiBs
memory = numa_node['memory'] // 1024
The fix provided does not seem to fix the error in the bug description.
There are at least 3 places where it should be fixed:
objects/node.py:
diff --git a/nailgun/ nailgun/ objects/ node.py b/nailgun/ nailgun/ objects/ node.py nailgun/ objects/ node.py nailgun/ objects/ node.py object) :
index 8833f7c..fd2b76c 100644
--- a/nailgun/
+++ b/nailgun/
@@ -1435,7 +1435,7 @@ class NodeAttributes(
@classmethod node_cpus( cls, node, attributes=None): 'numa_topology' ]['numa_ nodes'] get('numa_ topology' ,{}).get( 'numa_nodes' , []) cpu_pinning_ info(node, attributes) ['components' ] nics(node)
def distribute_
- numa_nodes = node.meta[
+ numa_nodes = node.meta.
components = cls.node_
dpdk_nics = Node.dpdk_
@@ -1497,7 +1497,7 @@ class NodeAttributes( object) :
"""
hugepages = collections. defaultdict( int) meta['numa_ topology' ]['numa_ nodes'] ) meta.get( 'numa_topology' , {}).get( 'numa_nodes' , []))
- numa_count = len(node.
+ numa_count = len(node.
for name, attrs in six.iteritems(
@@ -1557,7 +1557,7 @@ class NodeAttributes(
return {}
- numa_nodes_len = len(node.
+ numa_nodes_len = len(node.
return {
' ovs_socket_ mem': object) : hugepages( cls, node, attributes=None): get_hugepages( attributes) 'numa_topology' ] get('numa_ topology' , {})
@@ -1567,7 +1567,7 @@ class NodeAttributes(
def distribute_
hugepages = cls._safe_
node, attributes=
- topology = node.meta[
+ topology = node.meta.
# split components to 2 groups: object) :
# components that should have pages on all numa nodes (such as dpdk)
@@ -1595,7 +1595,7 @@ class NodeAttributes(
- for numa_node in topology[
+ for numa_node in topology.
and here
diff --git a/nailgun/ nailgun/ policy/ hugepages_ distribution. py b/nailgun/ nailgun/ policy/ hugepages_ distribution. py nailgun/ policy/ hugepages_ distribution. py nailgun/ policy/ hugepages_ distribution. py hugepages( numa_topology, components, numa_sort_func):
index 264f52e..d6b4412 100644
--- a/nailgun/
+++ b/nailgun/
@@ -132,7 +132,7 @@ def distribute_
any_comps = [Component(comp) for comp in components['any']]
numa_nodes = [] 'numa_nodes' ]: get('numa_ nodes', {}):
- for numa_node in numa_topology[
+ for numa_node in numa_topology.
# converting memory to KiBs
memory = numa_node['memory'] // 1024
@@ -145,8 +145,8 @@ def distribute_ hugepages( numa_topology, components, numa_sort_func):
numa_ nodes.sort( key=lambda x: numa_sort_ func(x. id))
- _allocate_ all(numa_ nodes, all_comps) any(numa_ nodes, any_comps) all(numa_ nodes, all_comps) any(numa_ nodes, any_comps)
- _allocate_
+# _allocate_
+# _allocate_
return sum([n.report() for n in numa_nodes], [])