From 05920d566ceaa28aff4460ebb2f30e1b6e51e265 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 6 Nov 2018 01:03:41 +0100 Subject: [PATCH v1] userns: defer setting up reverse mappings Currently, the forward map and reverse map are copied and sorted at the same time before necessary updates to the forward map have been performed. This has the consequence that the forward map receives the necessary updates while the reverse map does not leaving it with invalid data. Specifically, this means that the lower ids of the forward mapping will be correctly mapped to appropriate kernel ids, while the lower ids of the reverse mapping will not. This breaks inode_owner_or_capable() and privileged_wrt_inode_uidgid() which call helpers that need to access the reverse mapping. Thus, a process can incorrectly appear to be capable relative to an inode. Note that the sorting logic is only triggered when more than five extents are specified and when user namespaces are nested. Hence, only containers with complex mappings in nested user namespaces are affected. This patch ensures that the translation happens for both the forward and reverse mappings. First, the forward mappings are sorted and its lower ids translated into kernel ids. After this the forward mapping is copied and into the reverse mapping and the reverse mappings sorted. This is CVE-2018-18955. Fixes: 6397fac4915a ("userns: bump idmap limits to 340") Cc: stable@vger.kernel.org Signed-off-by: Jann Horn Signed-off-by: Christian Brauner --- kernel/user_namespace.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index e5222b5fb4fe..bfcba3f7b226 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -820,18 +820,20 @@ static int cmp_extents_reverse(const void *a, const void *b) return 0; } -/** - * sort_idmaps - Sorts an array of idmap entries. - * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS. - */ -static int sort_idmaps(struct uid_gid_map *map) +static inline void sort_idmaps_forward(struct uid_gid_map *map) { if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) - return 0; + return; /* Sort forward array. */ sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent), cmp_extents_forward, NULL); +} + +static int dup_and_sort_idmaps_reverse(struct uid_gid_map *map) +{ + if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) + return 0; /* Only copy the memory from forward we actually need. */ map->reverse = kmemdup(map->forward, @@ -974,13 +976,11 @@ static ssize_t map_write(struct file *file, const char __user *buf, if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) goto out; - ret = sort_idmaps(&new_map); - if (ret < 0) - goto out; + /* Sort forward array. */ + sort_idmaps_forward(&new_map); - ret = -EPERM; - /* Map the lower ids from the parent user namespace to the - * kernel global id space. + /* Update forward array by mapping the lower ids from the parent user + * namespace to the kernel global id space. */ for (idx = 0; idx < new_map.nr_extents; idx++) { struct uid_gid_extent *e; @@ -1004,6 +1004,11 @@ static ssize_t map_write(struct file *file, const char __user *buf, e->lower_first = lower_first; } + /* Duplicate forward array into reverse array and sort the latter. */ + ret = dup_and_sort_idmaps_reverse(&new_map); + if (ret < 0) + goto out; + /* Install the map */ if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) { memcpy(map->extent, new_map.extent, -- 2.19.1