+ if (NR_ACTIVE_FILE == lru) {
+ long long kib_active_file_now=global_node_page_state(NR_ACTIVE_FILE) * MAX_NR_ZONES;
+ if (kib_active_file_now <= 256*1024) {
+ nr[lru] = 0; //don't reclaim any Active(file) (see /proc/meminfo) if they are under 256MiB
+ continue;
+ }
+ }
*lru_pages += size;
nr[lru] = scan;
}
```
Just an idea, try reproducing with kernel patch `le9g.patch`:
``` .7a0b7e32ff45 100644 count(struct lruvec *lruvec, struct mem_cgroup *memcg,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dbdc46a84f63.
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2445,6 +2445,13 @@ static void get_scan_
BUG();
}
+ if (NR_ACTIVE_FILE == lru) { file_now= global_ node_page_ state(NR_ ACTIVE_ FILE) * MAX_NR_ZONES; file_now <= 256*1024) {
+ long long kib_active_
+ if (kib_active_
+ nr[lru] = 0; //don't reclaim any Active(file) (see /proc/meminfo) if they are under 256MiB
+ continue;
+ }
+ }
*lru_pages += size;
nr[lru] = scan;
}
```
see: https:/ /gist.github. com/constantove rride/84eba764f 487049ed642eb21 11a20830# gistcomment- 2997481
(^ scroll a bit up for some details of what the patch does)