diff -Nru crash-8.0.2/debian/changelog crash-8.0.2/debian/changelog --- crash-8.0.2/debian/changelog 2023-07-06 14:49:16.000000000 +0000 +++ crash-8.0.2/debian/changelog 2024-01-04 06:47:25.000000000 +0000 @@ -1,3 +1,34 @@ +crash (8.0.2-1ubuntu1.1) mantic; urgency=medium + + * Fix the dump file parsing issue arises from structural changes in Linux + kernel 6.5 (LP: #2038249) + - d/p/lp2038249-0001-arm64-handle-vabits_actual-symbol-missing-case.patch + - d/p/lp2038249-0002-x86_64-Fix-for-move-of-per-cpu-variables-into-struct.patch + - d/p/lp2038249-0003-Fix-for-mm_struct.rss_stat-conversion-into-percpu_co.patch + - d/p/lp2038249-0004-SLUB-Fix-for-offset-change-of-struct-slab-members-on.patch + - d/p/lp2038249-0005-Fix-for-kmem-i-to-display-correct-SLAB-statistics-on.patch + - d/p/lp2038249-0006-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.1-and-later.patch + - d/p/lp2038249-0007-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.2-rc1-and-l.patch + - d/p/lp2038249-0008-Port-the-maple-tree-data-structures-and-functions.patch + - d/p/lp2038249-0009-Add-do_maple_tree-for-maple-tree-operations.patch + - d/p/lp2038249-0010-Introduce-maple-tree-vma-iteration-to-vm_area_dump.patch + - d/p/lp2038249-0011-Dump-maple-tree-offset-variables-by-help-o.patch + - d/p/lp2038249-0012-Fix-kmem-n-option-to-display-memory-blocks-on-Linux-.patch + - d/p/lp2038249-0013-Fix-failure-of-dev-d-D-options-on-Linux-6.4-and-late.patch + - d/p/lp2038249-0014-Fix-kmem-v-option-displaying-no-regions-on-Linux-6.3.patch + - d/p/lp2038249-0015-x86_64-Fix-bt-command-printing-stale-entries-on-Linu.patch + - d/p/lp2038249-0016-Support-module-memory-layout-change-on-Linux-6.4.patch + - d/p/lp2038249-0017-Fix-failure-of-gathering-task-table-on-Linux-6.5-rc1.patch + - d/p/lp2038249-0018-Fix-compilation-error-due-to-new-strlcpy-function-th.patch + - d/p/lp2038249-0019-Fix-irq-a-option-on-Linux-6.0-and-later.patch + - d/p/lp2038249-0020-Exclude-zero-entries-from-do_maple_tree-return-value.patch + - d/p/lp2038249-0021-Fix-irq-a-s-options-on-Linux-6.5-rc1-and-later.patch + - d/p/lp2038249-0022-arm64-Fix-vtop-command-to-display-swap-information-o.patch + - d/p/lp2038249-0023-Fix-rd-command-to-display-data-on-zram-on-Linux-5.17.patch + - d/p/lp2038249-0024-Fix-compilation-error-and-warning-with-gcc-4.8.5.patch + + -- Chengen Du Thu, 04 Jan 2024 06:47:25 +0000 + crash (8.0.2-1ubuntu1) mantic; urgency=medium * Merge with Debian; remaining changes: diff -Nru crash-8.0.2/debian/patches/lp2038249-0001-arm64-handle-vabits_actual-symbol-missing-case.patch crash-8.0.2/debian/patches/lp2038249-0001-arm64-handle-vabits_actual-symbol-missing-case.patch --- crash-8.0.2/debian/patches/lp2038249-0001-arm64-handle-vabits_actual-symbol-missing-case.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0001-arm64-handle-vabits_actual-symbol-missing-case.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,30 @@ +From: Pavankumar Kondeti +Date: Thu Dec 8 09:55:07 2022 +0530 +Subject: arm64: handle vabits_actual symbol missing case + +After kernel commit 0d9b1ffefabe ("arm64: mm: make vabits_actual +a build time constant if possible") introduced in Linux v5.19, +the crash will not find vabits_actual symbol if VA_BITS <= 48. +Add a fallback option to initialize VA_BITS based on the user +supplied machdep option. + +Tested ramdumps loading in both 6.0 and 5.15 kernels. + +Signed-off-by: Pavankumar Kondeti + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/141e75f3c11cc9342f11418e0bec86877424bef8 + +--- crash-8.0.2.orig/arm64.c ++++ crash-8.0.2/arm64.c +@@ -4671,6 +4671,10 @@ arm64_calc_VA_BITS(void) + return; + } else if (arm64_set_va_bits_by_tcr()) { + return; ++ } else if (machdep->machspec->VA_BITS_ACTUAL) { ++ machdep->machspec->VA_BITS = machdep->machspec->VA_BITS_ACTUAL; ++ machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); ++ return; + } + + if (!(sp = symbol_search("swapper_pg_dir")) && diff -Nru crash-8.0.2/debian/patches/lp2038249-0002-x86_64-Fix-for-move-of-per-cpu-variables-into-struct.patch crash-8.0.2/debian/patches/lp2038249-0002-x86_64-Fix-for-move-of-per-cpu-variables-into-struct.patch --- crash-8.0.2/debian/patches/lp2038249-0002-x86_64-Fix-for-move-of-per-cpu-variables-into-struct.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0002-x86_64-Fix-for-move-of-per-cpu-variables-into-struct.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,136 @@ +From: Kazuhito Hagio +Date: Wed Dec 7 09:46:56 2022 +0900 +Subject: x86_64: Fix for move of per-cpu variables into struct pcpu_hot + +The following kernel commits, which are contained in Linux 6.2-rc1 and +later kernels, introduced struct pcpu_hot and moved several per-cpu +variables into it. + + d7b6d709a76a x86/percpu: Move irq_stack variables next to current_task + 7443b296e699 x86/percpu: Move cpu_number next to current_task + e57ef2ed97c1 x86: Put hot per CPU variables into a struct + +Without the patch, crash fails to start session with the following +error: + + $ crash vmlinux vmcore + ... + bt: invalid size request: 0 type: "stack contents" + bt: read of stack at 0 failed + + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/df1f0cba729fa0e0d8a63220769c42cc9033acc1 + +--- crash-8.0.2.orig/x86_64.c ++++ crash-8.0.2/x86_64.c +@@ -1290,12 +1290,15 @@ x86_64_per_cpu_init(void) + { + int i, cpus, cpunumber; + struct machine_specific *ms; +- struct syment *irq_sp, *curr_sp, *cpu_sp, *hardirq_stack_ptr_sp; ++ struct syment *irq_sp, *curr_sp, *cpu_sp, *hardirq_stack_ptr_sp, *pcpu_sp; + ulong hardirq_stack_ptr; + ulong __per_cpu_load = 0; ++ long hardirq_addr = 0, cpu_addr = 0, curr_addr = 0; + + ms = machdep->machspec; + ++ pcpu_sp = per_cpu_symbol_search("pcpu_hot"); ++ + hardirq_stack_ptr_sp = per_cpu_symbol_search("hardirq_stack_ptr"); + irq_sp = per_cpu_symbol_search("per_cpu__irq_stack_union"); + cpu_sp = per_cpu_symbol_search("per_cpu__cpu_number"); +@@ -1324,7 +1327,7 @@ x86_64_per_cpu_init(void) + return; + } + +- if (!cpu_sp || (!irq_sp && !hardirq_stack_ptr_sp)) ++ if (!pcpu_sp && (!cpu_sp || (!irq_sp && !hardirq_stack_ptr_sp))) + return; + + if (MEMBER_EXISTS("irq_stack_union", "irq_stack")) +@@ -1337,10 +1340,21 @@ x86_64_per_cpu_init(void) + if (kernel_symbol_exists("__per_cpu_load")) + __per_cpu_load = symbol_value("__per_cpu_load"); + ++ if (pcpu_sp) { ++ hardirq_addr = pcpu_sp->value + MEMBER_OFFSET("pcpu_hot", "hardirq_stack_ptr"); ++ cpu_addr = pcpu_sp->value + MEMBER_OFFSET("pcpu_hot", "cpu_number"); ++ curr_addr = pcpu_sp->value + MEMBER_OFFSET("pcpu_hot", "current_task"); ++ } else { ++ if (hardirq_stack_ptr_sp) ++ hardirq_addr = hardirq_stack_ptr_sp->value; ++ cpu_addr = cpu_sp->value; ++ curr_addr = curr_sp->value; ++ } ++ + for (i = cpus = 0; i < NR_CPUS; i++) { + if (__per_cpu_load && kt->__per_cpu_offset[i] == __per_cpu_load) + break; +- if (!readmem(cpu_sp->value + kt->__per_cpu_offset[i], ++ if (!readmem(cpu_addr + kt->__per_cpu_offset[i], + KVADDR, &cpunumber, sizeof(int), + "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR)) + break; +@@ -1349,8 +1363,8 @@ x86_64_per_cpu_init(void) + break; + cpus++; + +- if (hardirq_stack_ptr_sp) { +- if (!readmem(hardirq_stack_ptr_sp->value + kt->__per_cpu_offset[i], ++ if (pcpu_sp || hardirq_stack_ptr_sp) { ++ if (!readmem(hardirq_addr + kt->__per_cpu_offset[i], + KVADDR, &hardirq_stack_ptr, sizeof(void *), + "hardirq_stack_ptr (per_cpu)", QUIET|RETURN_ON_ERROR)) + continue; +@@ -1373,13 +1387,13 @@ x86_64_per_cpu_init(void) + else + kt->cpus = cpus; + +- if (DUMPFILE() && curr_sp) { ++ if (DUMPFILE() && (pcpu_sp || curr_sp)) { + if ((ms->current = calloc(kt->cpus, sizeof(ulong))) == NULL) + error(FATAL, + "cannot calloc %d x86_64 current pointers!\n", + kt->cpus); + for (i = 0; i < kt->cpus; i++) +- if (!readmem(curr_sp->value + kt->__per_cpu_offset[i], ++ if (!readmem(curr_addr + kt->__per_cpu_offset[i], + KVADDR, &ms->current[i], sizeof(ulong), + "current_task (per_cpu)", RETURN_ON_ERROR)) + continue; +@@ -5625,11 +5639,19 @@ x86_64_get_smp_cpus(void) + char *cpu_pda_buf; + ulong level4_pgt, cpu_pda_addr; + struct syment *sp; +- ulong __per_cpu_load = 0; ++ ulong __per_cpu_load = 0, cpu_addr; + + if (!VALID_STRUCT(x8664_pda)) { +- if (!(sp = per_cpu_symbol_search("per_cpu__cpu_number")) || +- !(kt->flags & PER_CPU_OFF)) ++ ++ if (!(kt->flags & PER_CPU_OFF)) ++ return 1; ++ ++ if ((sp = per_cpu_symbol_search("pcpu_hot")) && ++ (cpu_addr = MEMBER_OFFSET("pcpu_hot", "cpu_number")) != INVALID_OFFSET) ++ cpu_addr += sp->value; ++ else if ((sp = per_cpu_symbol_search("per_cpu__cpu_number"))) ++ cpu_addr = sp->value; ++ else + return 1; + + if (kernel_symbol_exists("__per_cpu_load")) +@@ -5638,7 +5660,7 @@ x86_64_get_smp_cpus(void) + for (i = cpus = 0; i < NR_CPUS; i++) { + if (__per_cpu_load && kt->__per_cpu_offset[i] == __per_cpu_load) + break; +- if (!readmem(sp->value + kt->__per_cpu_offset[i], ++ if (!readmem(cpu_addr + kt->__per_cpu_offset[i], + KVADDR, &cpunumber, sizeof(int), + "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR)) + break; diff -Nru crash-8.0.2/debian/patches/lp2038249-0003-Fix-for-mm_struct.rss_stat-conversion-into-percpu_co.patch crash-8.0.2/debian/patches/lp2038249-0003-Fix-for-mm_struct.rss_stat-conversion-into-percpu_co.patch --- crash-8.0.2/debian/patches/lp2038249-0003-Fix-for-mm_struct.rss_stat-conversion-into-percpu_co.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0003-Fix-for-mm_struct.rss_stat-conversion-into-percpu_co.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,143 @@ +From: Kazuhito Hagio +Date: Thu Dec 15 11:31:38 2022 +0900 +Subject: Fix for mm_struct.rss_stat conversion into percpu_counter + +Kernel commit f1a7941243c1 ("mm: convert mm's rss stats into +percpu_counter"), which is contained in Linux 6.2-rc1 and later +kernels, changed mm_struct.rss_stat from struct mm_rss_stat into an +array of struct percpu_counter. + +Without the patch, "ps" and several commands fail with the following +error message: + + ps: invalid structure member offset: mm_rss_stat_count + FILE: memory.c LINE: 4724 FUNCTION: get_task_mem_usage() + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/f182d08bab202dddf20b742fef6cc2bda0a56d6c + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2181,6 +2181,7 @@ struct offset_table { + long blk_mq_tags_nr_reserved_tags; + long blk_mq_tags_rqs; + long request_queue_hctx_table; ++ long percpu_counter_counters; + }; + + struct size_table { /* stash of commonly-used sizes */ +@@ -2351,6 +2352,7 @@ struct size_table { /* stash of + long sbitmap_queue; + long sbq_wait_state; + long blk_mq_tags; ++ long percpu_counter; + }; + + struct array_table { +@@ -5305,6 +5307,7 @@ struct rb_node *rb_right(struct rb_node + struct rb_node *rb_left(struct rb_node *, struct rb_node *); + struct rb_node *rb_next(struct rb_node *); + struct rb_node *rb_last(struct rb_root *); ++long percpu_counter_sum_positive(ulong fbc); + + /* + * symbols.c +--- crash-8.0.2.orig/kernel.c ++++ crash-8.0.2/kernel.c +@@ -316,6 +316,8 @@ kernel_init() + } + + MEMBER_OFFSET_INIT(percpu_counter_count, "percpu_counter", "count"); ++ MEMBER_OFFSET_INIT(percpu_counter_counters, "percpu_counter", "counters"); ++ STRUCT_SIZE_INIT(percpu_counter, "percpu_counter"); + + if (STRUCT_EXISTS("runqueue")) { + rqstruct = "runqueue"; +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -4713,7 +4713,7 @@ get_task_mem_usage(ulong task, struct ta + /* + * Latest kernels have mm_struct.mm_rss_stat[]. + */ +- if (VALID_MEMBER(mm_struct_rss_stat)) { ++ if (VALID_MEMBER(mm_struct_rss_stat) && VALID_MEMBER(mm_rss_stat_count)) { + long anonpages, filepages, count; + + anonpages = tt->anonpages; +@@ -4737,6 +4737,18 @@ get_task_mem_usage(ulong task, struct ta + (anonpages * sizeof(long))); + if (count > 0) + rss += count; ++ ++ } else if (VALID_MEMBER(mm_struct_rss_stat)) { ++ /* 6.2: struct percpu_counter rss_stat[NR_MM_COUNTERS] */ ++ ulong fbc; ++ ++ fbc = tc->mm_struct + OFFSET(mm_struct_rss_stat) + ++ (tt->filepages * SIZE(percpu_counter)); ++ rss += percpu_counter_sum_positive(fbc); ++ ++ fbc = tc->mm_struct + OFFSET(mm_struct_rss_stat) + ++ (tt->anonpages * SIZE(percpu_counter)); ++ rss += percpu_counter_sum_positive(fbc); + } + + /* Check whether SPLIT_RSS_COUNTING is enabled */ +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -10633,8 +10633,8 @@ dump_offset_table(char *spec, ulong make + OFFSET(ktime_t_nsec)); + fprintf(fp, " atomic_t_counter: %ld\n", + OFFSET(atomic_t_counter)); +- fprintf(fp, " percpu_counter_count: %ld\n", +- OFFSET(percpu_counter_count)); ++ fprintf(fp, " percpu_counter_count: %ld\n", OFFSET(percpu_counter_count)); ++ fprintf(fp, " percpu_counter_counters: %ld\n", OFFSET(percpu_counter_counters)); + fprintf(fp, " sk_buff_head_next: %ld\n", + OFFSET(sk_buff_head_next)); + fprintf(fp, " sk_buff_head_qlen: %ld\n", +@@ -11028,6 +11028,8 @@ dump_offset_table(char *spec, ulong make + fprintf(fp, " sbq_wait_state: %ld\n", SIZE(sbq_wait_state)); + fprintf(fp, " blk_mq_tags: %ld\n", SIZE(blk_mq_tags)); + ++ fprintf(fp, " percpu_counter: %ld\n", SIZE(percpu_counter)); ++ + fprintf(fp, "\n array_table:\n"); + /* + * Use get_array_length() for those fields not set up at init-time; +--- crash-8.0.2.orig/tools.c ++++ crash-8.0.2/tools.c +@@ -6902,3 +6902,31 @@ rb_last(struct rb_root *root) + + return node; + } ++ ++long ++percpu_counter_sum_positive(ulong fbc) ++{ ++ int i, count; ++ ulong addr; ++ long ret; ++ ++ if (INVALID_MEMBER(percpu_counter_count)) ++ return 0; ++ ++ readmem(fbc + OFFSET(percpu_counter_count), KVADDR, &ret, ++ sizeof(long long), "percpu_counter.count", FAULT_ON_ERROR); ++ ++ if (INVALID_MEMBER(percpu_counter_counters)) /* !CONFIG_SMP */ ++ return (ret < 0) ? 0 : ret; ++ ++ readmem(fbc + OFFSET(percpu_counter_counters), KVADDR, &addr, ++ sizeof(void *), "percpu_counter.counters", FAULT_ON_ERROR); ++ ++ for (i = 0; i < kt->cpus; i++) { ++ readmem(addr + kt->__per_cpu_offset[i], KVADDR, &count, ++ sizeof(int), "percpu_counter.counters count", FAULT_ON_ERROR); ++ ret += count; ++ } ++ ++ return (ret < 0) ? 0 : ret; ++} diff -Nru crash-8.0.2/debian/patches/lp2038249-0004-SLUB-Fix-for-offset-change-of-struct-slab-members-on.patch crash-8.0.2/debian/patches/lp2038249-0004-SLUB-Fix-for-offset-change-of-struct-slab-members-on.patch --- crash-8.0.2/debian/patches/lp2038249-0004-SLUB-Fix-for-offset-change-of-struct-slab-members-on.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0004-SLUB-Fix-for-offset-change-of-struct-slab-members-on.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,135 @@ +From: Kazuhito Hagio +Date: Fri Dec 16 14:03:46 2022 +0900 +Subject: SLUB: Fix for offset change of struct slab members on Linux 6.2-rc1 + +The following kernel commits split slab info from struct page into +struct slab in Linux 5.17. + + d122019bf061 ("mm: Split slab into its own type") + 07f910f9b729 ("mm: Remove slab from struct page") + +Crash commit 5f390ed811b0 followed the change for SLUB, but crash still +uses the offset of page.lru inappropriately. Luckily, it could work +because it was the same value as the offset of slab.slab_list until +Linux 6.1. + +However, kernel commit 130d4df57390 ("mm/sl[au]b: rearrange struct slab +fields to allow larger rcu_head") in Linux 6.2-rc1 changed the offset of +slab.slab_list. As a result, without the patch, "kmem -s|-S" options +print the following errors and fail to print values correctly for +kernels configured with CONFIG_SLUB. + + crash> kmem -S filp + CACHE OBJSIZE ALLOCATED TOTAL SLABS SSIZE NAME + kmem: filp: partial list slab: ffffcc650405ab88 invalid page.inuse: -1 + ffff8fa0401eca00 232 1267 1792 56 8k filp + ... + KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU + ffff8fa0401cb8c0 0 56 24 8 + NODE 0 PARTIAL: + SLAB MEMORY NODE TOTAL ALLOCATED FREE + kmem: filp: invalid partial list slab pointer: ffffcc650405ab88 + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/d83df2fb66cd77877d365fda32cd45c531796599 + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2182,6 +2182,7 @@ struct offset_table { + long blk_mq_tags_rqs; + long request_queue_hctx_table; + long percpu_counter_counters; ++ long slab_slab_list; + }; + + struct size_table { /* stash of commonly-used sizes */ +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -781,6 +781,8 @@ vm_init(void) + if (INVALID_MEMBER(page_slab)) + MEMBER_OFFSET_INIT(page_slab, "slab", "slab_cache"); + ++ MEMBER_OFFSET_INIT(slab_slab_list, "slab", "slab_list"); ++ + MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); + if (INVALID_MEMBER(page_slab_page)) + ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); +@@ -19474,6 +19476,7 @@ do_node_lists_slub(struct meminfo *si, u + { + ulong next, last, list_head, flags; + int first; ++ long list_off = VALID_MEMBER(slab_slab_list) ? OFFSET(slab_slab_list) : OFFSET(page_lru); + + if (!node_ptr) + return; +@@ -19487,7 +19490,7 @@ do_node_lists_slub(struct meminfo *si, u + next == list_head ? " (empty)\n" : ""); + first = 0; + while (next != list_head) { +- si->slab = last = next - OFFSET(page_lru); ++ si->slab = last = next - list_off; + if (first++ == 0) + fprintf(fp, " %s", slab_hdr); + +@@ -19510,7 +19513,7 @@ do_node_lists_slub(struct meminfo *si, u + + if (!IS_KVADDR(next) || + ((next != list_head) && +- !is_page_ptr(next - OFFSET(page_lru), NULL))) { ++ !is_page_ptr(next - list_off, NULL))) { + error(INFO, + "%s: partial list slab: %lx invalid page.lru.next: %lx\n", + si->curname, last, next); +@@ -19537,7 +19540,7 @@ do_node_lists_slub(struct meminfo *si, u + next == list_head ? " (empty)\n" : ""); + first = 0; + while (next != list_head) { +- si->slab = next - OFFSET(page_lru); ++ si->slab = next - list_off; + if (first++ == 0) + fprintf(fp, " %s", slab_hdr); + +@@ -19754,6 +19757,7 @@ count_partial(ulong node, struct meminfo + short inuse, objects; + ulong total_inuse; + ulong count = 0; ++ long list_off = VALID_MEMBER(slab_slab_list) ? OFFSET(slab_slab_list) : OFFSET(page_lru); + + count = 0; + total_inuse = 0; +@@ -19765,12 +19769,12 @@ count_partial(ulong node, struct meminfo + hq_open(); + + while (next != list_head) { +- if (!readmem(next - OFFSET(page_lru) + OFFSET(page_inuse), ++ if (!readmem(next - list_off + OFFSET(page_inuse), + KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) { + hq_close(); + return -1; + } +- last = next - OFFSET(page_lru); ++ last = next - list_off; + + if (inuse == -1) { + error(INFO, +@@ -19796,7 +19800,7 @@ count_partial(ulong node, struct meminfo + } + if (!IS_KVADDR(next) || + ((next != list_head) && +- !is_page_ptr(next - OFFSET(page_lru), NULL))) { ++ !is_page_ptr(next - list_off, NULL))) { + error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", + si->curname, last, next); + break; +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -9700,6 +9700,7 @@ dump_offset_table(char *spec, ulong make + OFFSET(slab_inuse)); + fprintf(fp, " slab_free: %ld\n", + OFFSET(slab_free)); ++ fprintf(fp, " slab_slab_list: %ld\n", OFFSET(slab_slab_list)); + + fprintf(fp, " kmem_cache_size: %ld\n", + OFFSET(kmem_cache_size)); diff -Nru crash-8.0.2/debian/patches/lp2038249-0005-Fix-for-kmem-i-to-display-correct-SLAB-statistics-on.patch crash-8.0.2/debian/patches/lp2038249-0005-Fix-for-kmem-i-to-display-correct-SLAB-statistics-on.patch --- crash-8.0.2/debian/patches/lp2038249-0005-Fix-for-kmem-i-to-display-correct-SLAB-statistics-on.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0005-Fix-for-kmem-i-to-display-correct-SLAB-statistics-on.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,42 @@ +From: Lianbo Jiang +Date: Fri Dec 23 18:42:35 2022 +0800 +Subject: Fix for "kmem -i" to display correct SLAB statistics on Linux 5.9 and later + +Kernel commit d42f3245c7e2 ("mm: memcg: convert vmstat slab counters to +bytes"), which is contained in Linux v5.9-rc1 and later kernels, renamed +NR_SLAB_{RECLAIMABLE,UNRECLAIMABLE} to NR_SLAB_{RECLAIMABLE,UNRECLAIMABLE}_B. + +Without the patch, "kmem -i" command will display incorrect SLAB +statistics: + + crash> kmem -i | grep -e PAGES -e SLAB + PAGES TOTAL PERCENTAGE + SLAB 89458 349.4 MB 0% of TOTAL MEM + ^^^^^ ^^^^^ + +With the patch, the actual result is: + crash> kmem -i | grep -e PAGES -e SLAB + PAGES TOTAL PERCENTAGE + SLAB 261953 1023.3 MB 0% of TOTAL MEM + +Reported-by: Buland Kumar Singh +Signed-off-by: Lianbo Jiang +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/41d4b85ea50efc733df65ec8421a74be10e47987 + +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -8388,6 +8388,11 @@ dump_kmeminfo(void) + get_slabs = nr_slab; + if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0)) + get_slabs += nr_slab; ++ } else if (dump_vm_stat("NR_SLAB_RECLAIMABLE_B", &nr_slab, 0)) { ++ /* 5.9 and later */ ++ get_slabs = nr_slab; ++ if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE_B", &nr_slab, 0)) ++ get_slabs += nr_slab; + } + } + diff -Nru crash-8.0.2/debian/patches/lp2038249-0006-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.1-and-later.patch crash-8.0.2/debian/patches/lp2038249-0006-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.1-and-later.patch --- crash-8.0.2/debian/patches/lp2038249-0006-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.1-and-later.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0006-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.1-and-later.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,38 @@ +From: Kazuhito Hagio +Date: Thu Jan 5 17:18:51 2023 +0900 +Subject: SLAB: Fix for "kmem -s|-S" options on Linux 6.1 and later + +Kernel commit e36ce448a08d ("mm/slab: use kmalloc_node() for off slab +freelist_idx_t array allocation"), which is contained in Linux 6.1 and +later kernels, removed kmem_cache.freelist_cache member on kernels +configured with CONFIG_SLAB=y. + +Without the patch, crash does not set SLAB_OVERLOAD_PAGE and +"kmem -s|-S" options fail with the following error: + + kmem: invalid structure member offset: slab_list + FILE: memory.c LINE: 12156 FUNCTION: verify_slab_v2() + +Use kmem_cache.freelist_size instead, which was introduced together +with kmem_cache.freelist_cache by kernel commit 8456a648cf44. + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/120d6e89fc14eb7f1c9a3106305c7066730f36b8 + +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -535,8 +535,11 @@ vm_init(void) + /* + * slab: overload struct slab over struct page + * https://lkml.org/lkml/2013/10/16/155 ++ * ++ * commit e36ce448a08d removed kmem_cache.freelist_cache in 6.1, ++ * so use freelist_size instead. + */ +- if (MEMBER_EXISTS("kmem_cache", "freelist_cache")) { ++ if (MEMBER_EXISTS("kmem_cache", "freelist_size")) { + vt->flags |= SLAB_OVERLOAD_PAGE; + ANON_MEMBER_OFFSET_INIT(page_s_mem, "page", "s_mem"); + ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); diff -Nru crash-8.0.2/debian/patches/lp2038249-0007-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.2-rc1-and-l.patch crash-8.0.2/debian/patches/lp2038249-0007-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.2-rc1-and-l.patch --- crash-8.0.2/debian/patches/lp2038249-0007-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.2-rc1-and-l.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0007-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.2-rc1-and-l.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,225 @@ +From: Kazuhito Hagio +Date: Thu Jan 5 17:36:42 2023 +0900 +Subject: SLAB: Fix for "kmem -s|-S" options on Linux 6.2-rc1 and later + +Kernel commit 130d4df57390 ("mm/sl[au]b: rearrange struct slab fields to +allow larger rcu_head"), which is contained in Linux 6.2-rc1 and later +kernels, changed the offset of slab.slab_list and now it's not equal to +the offset of page.lru. + +Without the patch, "kmem -s|-S" options print errors and zeros for slab +counters like this for kernels configured with CONFIG_SLAB=y. + + crash> kmem -s + CACHE OBJSIZE ALLOCATED TOTAL SLABS SSIZE NAME + kmem: rpc_inode_cache: partial list: page/slab: fffff31ac4125190 bad active counter: 99476865 + kmem: rpc_inode_cache: partial list: page/slab: fffff31ac4125190 bad s_mem pointer: 100000003 + kmem: rpc_inode_cache: full list: page/slab: fffff31ac4125150 bad active counter: 99476225 + kmem: rpc_inode_cache: full list: page/slab: fffff31ac4125150 bad active counter: 99476225 + kmem: rpc_inode_cache: full list: page/slab: fffff31ac4125150 bad s_mem pointer: 100000005 + ffff930202adfb40 704 0 0 0 4k rpc_inode_cache + ... + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/ac96e17d1de51016ee1a983e68c7e840ff55ab8d + +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -78,6 +78,7 @@ struct meminfo { /* general pu + int *freelist; + int freelist_index_size; + ulong random; ++ ulong list_offset; + }; + + /* +@@ -553,6 +554,8 @@ vm_init(void) + MEMBER_OFFSET_INIT(page_freelist, "slab", "freelist"); + if (INVALID_MEMBER(page_active)) + MEMBER_OFFSET_INIT(page_active, "slab", "active"); ++ ++ MEMBER_OFFSET_INIT(slab_slab_list, "slab", "slab_list"); + } + + if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { +@@ -10767,6 +10770,8 @@ dump_kmem_cache_percpu_v2(struct meminfo + if (vt->flags & SLAB_OVERLOAD_PAGE) { + si->freelist = si->kmem_bufctl; + si->freelist_index_size = slab_freelist_index_size(); ++ si->list_offset = VALID_MEMBER(slab_slab_list) ? ++ OFFSET(slab_slab_list) : OFFSET(page_lru); + } + for (i = 0; i < vt->kmem_max_cpus; i++) + si->cpudata[i] = (ulong *) +@@ -11983,7 +11988,7 @@ do_slab_chain_slab_overload_page(long cm + } + last = si->slab; + +- readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, ++ readmem(si->slab - si->list_offset, KVADDR, page_buf, + SIZE(page), "page (slab) buffer", + FAULT_ON_ERROR); + +@@ -11996,8 +12001,7 @@ do_slab_chain_slab_overload_page(long cm + + si->num_slabs++; + +- si->slab = ULONG(page_buf + +- OFFSET(page_lru)); ++ si->slab = ULONG(page_buf + si->list_offset); + + /* + * Check for slab transition. (Tony Dziedzic) +@@ -12024,11 +12028,11 @@ do_slab_chain_slab_overload_page(long cm + case SLAB_WALKTHROUGH: + if (si->flags & SLAB_OVERLOAD_PAGE_PTR) { + specified_slab = si->spec_addr; +- si->slab = si->spec_addr + OFFSET(page_lru); ++ si->slab = si->spec_addr + si->list_offset; + } else { + specified_slab = si->slab; + if (si->slab) +- si->slab += OFFSET(page_lru); ++ si->slab += si->list_offset; + } + si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); + si->flags &= ~SLAB_GET_COUNTS; +@@ -12082,7 +12086,7 @@ do_slab_chain_slab_overload_page(long cm + if (si->slab == slab_chains[s]) + continue; + +- readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, ++ readmem(si->slab - si->list_offset, KVADDR, page_buf, + SIZE(page), "page (slab) buffer", + FAULT_ON_ERROR); + +@@ -12242,7 +12246,7 @@ verify_slab_overload_page(struct meminfo + + errcnt = 0; + +- if (!readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, ++ if (!readmem(si->slab - si->list_offset, KVADDR, page_buf, + SIZE(page), "page (slab) buffer", QUIET|RETURN_ON_ERROR)) { + error(INFO, "%s: %s list: bad slab pointer: %lx\n", + si->curname, list, si->slab); +@@ -12250,7 +12254,7 @@ verify_slab_overload_page(struct meminfo + return FALSE; + } + +- list_head = (struct kernel_list_head *)(page_buf + OFFSET(page_lru)); ++ list_head = (struct kernel_list_head *)(page_buf + si->list_offset); + if (!IS_KVADDR((ulong)list_head->next) || + !accessible((ulong)list_head->next)) { + error(INFO, "%s: %s list: page/slab: %lx bad next pointer: %lx\n", +@@ -12569,7 +12573,7 @@ dump_slab_overload_page(struct meminfo * + int tmp; + ulong slab_overload_page, freelist; + +- slab_overload_page = si->slab - OFFSET(page_lru); ++ slab_overload_page = si->slab - si->list_offset; + + readmem(slab_overload_page + OFFSET(page_s_mem), + KVADDR, &si->s_mem, sizeof(ulong), +@@ -12796,12 +12800,12 @@ gather_slab_free_list_slab_overload_page + + if (CRASHDEBUG(1)) + fprintf(fp, "slab page: %lx active: %ld si->c_num: %ld\n", +- si->slab - OFFSET(page_lru), si->s_inuse, si->c_num); ++ si->slab - si->list_offset, si->s_inuse, si->c_num); + + if (si->s_inuse == si->c_num ) + return; + +- slab_overload_page = si->slab - OFFSET(page_lru); ++ slab_overload_page = si->slab - si->list_offset; + readmem(slab_overload_page + OFFSET(page_freelist), + KVADDR, &freelist, sizeof(void *), "page freelist", + FAULT_ON_ERROR); +@@ -13099,7 +13103,7 @@ dump_slab_objects_percpu(struct meminfo + + if ((si->flags & ADDRESS_SPECIFIED) && + (vt->flags & SLAB_OVERLOAD_PAGE)) { +- readmem(si->slab - OFFSET(page_lru) + OFFSET(page_freelist), ++ readmem(si->slab - si->list_offset + OFFSET(page_freelist), + KVADDR, &freelist, sizeof(ulong), "page.freelist", + FAULT_ON_ERROR); + +@@ -18713,6 +18717,9 @@ dump_kmem_cache_slub(struct meminfo *si) + + si->cache_buf = GETBUF(SIZE(kmem_cache)); + ++ si->list_offset = VALID_MEMBER(slab_slab_list) ? ++ OFFSET(slab_slab_list) : OFFSET(page_lru); ++ + if (VALID_MEMBER(page_objects) && + OFFSET(page_objects) == OFFSET(page_inuse)) + si->flags |= SLAB_BITFIELD; +@@ -19484,7 +19491,6 @@ do_node_lists_slub(struct meminfo *si, u + { + ulong next, last, list_head, flags; + int first; +- long list_off = VALID_MEMBER(slab_slab_list) ? OFFSET(slab_slab_list) : OFFSET(page_lru); + + if (!node_ptr) + return; +@@ -19498,7 +19504,7 @@ do_node_lists_slub(struct meminfo *si, u + next == list_head ? " (empty)\n" : ""); + first = 0; + while (next != list_head) { +- si->slab = last = next - list_off; ++ si->slab = last = next - si->list_offset; + if (first++ == 0) + fprintf(fp, " %s", slab_hdr); + +@@ -19521,7 +19527,7 @@ do_node_lists_slub(struct meminfo *si, u + + if (!IS_KVADDR(next) || + ((next != list_head) && +- !is_page_ptr(next - list_off, NULL))) { ++ !is_page_ptr(next - si->list_offset, NULL))) { + error(INFO, + "%s: partial list slab: %lx invalid page.lru.next: %lx\n", + si->curname, last, next); +@@ -19548,7 +19554,7 @@ do_node_lists_slub(struct meminfo *si, u + next == list_head ? " (empty)\n" : ""); + first = 0; + while (next != list_head) { +- si->slab = next - list_off; ++ si->slab = next - si->list_offset; + if (first++ == 0) + fprintf(fp, " %s", slab_hdr); + +@@ -19765,7 +19771,6 @@ count_partial(ulong node, struct meminfo + short inuse, objects; + ulong total_inuse; + ulong count = 0; +- long list_off = VALID_MEMBER(slab_slab_list) ? OFFSET(slab_slab_list) : OFFSET(page_lru); + + count = 0; + total_inuse = 0; +@@ -19777,12 +19782,12 @@ count_partial(ulong node, struct meminfo + hq_open(); + + while (next != list_head) { +- if (!readmem(next - list_off + OFFSET(page_inuse), ++ if (!readmem(next - si->list_offset + OFFSET(page_inuse), + KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) { + hq_close(); + return -1; + } +- last = next - list_off; ++ last = next - si->list_offset; + + if (inuse == -1) { + error(INFO, +@@ -19808,7 +19813,7 @@ count_partial(ulong node, struct meminfo + } + if (!IS_KVADDR(next) || + ((next != list_head) && +- !is_page_ptr(next - list_off, NULL))) { ++ !is_page_ptr(next - si->list_offset, NULL))) { + error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", + si->curname, last, next); + break; diff -Nru crash-8.0.2/debian/patches/lp2038249-0008-Port-the-maple-tree-data-structures-and-functions.patch crash-8.0.2/debian/patches/lp2038249-0008-Port-the-maple-tree-data-structures-and-functions.patch --- crash-8.0.2/debian/patches/lp2038249-0008-Port-the-maple-tree-data-structures-and-functions.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0008-Port-the-maple-tree-data-structures-and-functions.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,621 @@ +From: Tao Liu +Date: Tue Jan 10 14:56:27 2023 +0800 +Subject: Port the maple tree data structures and functions + +There have been two ways to iterate vm_area_struct until Linux 6.0: + 1) by rbtree, aka vma.vm_rb; + 2) by linked list, aka vma.vm_{next,prev}. +However with the maple tree patches[1][2] in Linux 6.1, vm_rb and +vm_{next,prev} are removed from vm_area_struct. The vm_area_dump() +in crash mainly uses the linked list for vma iteration, which will +not work for this case. So the maple tree iteration needs to be +ported to crash. + +For crash, currently it only iteratively reads the maple tree, +no more rcu safe or maple tree modification features needed. +So we only port a subset of kernel maple tree features. +In addition, we need to modify the ported kernel source code, +making it compatible with crash. + +This patch deals with the two issues: + 1) Poring mt_dump() function and all its dependencies from + kernel source to crash, to enable crash maple tree iteration, + 2) adapting the ported code with crash. + +[1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=524e00b36e8c547f5582eef3fb645a8d9fc5e3df +[2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=763ecb035029f500d7e6dc99acd1ad299b7726a1 + +Signed-off-by: Tao Liu + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/872cad2d63b3a07f65323fe80a7abb29ea276b44 + +Index: crash-8.0.2/defs.h +=================================================================== +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2183,6 +2183,21 @@ struct offset_table { + long request_queue_hctx_table; + long percpu_counter_counters; + long slab_slab_list; ++ long mm_struct_mm_mt; ++ long maple_tree_ma_root; ++ long maple_tree_ma_flags; ++ long maple_node_parent; ++ long maple_node_ma64; ++ long maple_node_mr64; ++ long maple_node_slot; ++ long maple_arange_64_pivot; ++ long maple_arange_64_slot; ++ long maple_arange_64_gap; ++ long maple_arange_64_meta; ++ long maple_range_64_pivot; ++ long maple_range_64_slot; ++ long maple_metadata_end; ++ long maple_metadata_gap; + }; + + struct size_table { /* stash of commonly-used sizes */ +@@ -2354,6 +2369,8 @@ struct size_table { /* stash of + long sbq_wait_state; + long blk_mq_tags; + long percpu_counter; ++ long maple_tree; ++ long maple_node; + }; + + struct array_table { +@@ -5561,6 +5578,8 @@ int file_dump(ulong, ulong, ulong, int, + int same_file(char *, char *); + int cleanup_memory_driver(void); + ++void maple_init(void); ++int do_mptree(struct tree_data *); + + /* + * help.c +Index: crash-8.0.2/maple_tree.c +=================================================================== +--- /dev/null ++++ crash-8.0.2/maple_tree.c +@@ -0,0 +1,407 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Maple Tree implementation ++ * Copyright (c) 2018-2022 Oracle Corporation ++ * Authors: Liam R. Howlett ++ * Matthew Wilcox ++ * ++ * The following are copied and modified from lib/maple_tree.c ++ */ ++ ++#include "maple_tree.h" ++#include "defs.h" ++ ++unsigned char *mt_slots = NULL; ++unsigned char *mt_pivots = NULL; ++ulong mt_max[4] = {0}; ++ ++#define MAPLE_BUFSIZE 512 ++ ++static inline ulong mte_to_node(ulong maple_enode_entry) ++{ ++ return maple_enode_entry & ~MAPLE_NODE_MASK; ++} ++ ++static inline enum maple_type mte_node_type(ulong maple_enode_entry) ++{ ++ return (maple_enode_entry >> MAPLE_NODE_TYPE_SHIFT) & ++ MAPLE_NODE_TYPE_MASK; ++} ++ ++static inline ulong mt_slot(void **slots, unsigned char offset) ++{ ++ return (ulong)slots[offset]; ++} ++ ++static inline bool ma_is_leaf(const enum maple_type type) ++{ ++ return type < maple_range_64; ++} ++ ++/*************** For cmd_tree ********************/ ++ ++struct maple_tree_ops { ++ void (*entry)(ulong node, ulong slot, const char *path, ++ ulong index, void *private); ++ void *private; ++ bool is_td; ++}; ++ ++static const char spaces[] = " "; ++ ++static void do_mt_range64(ulong, ulong, ulong, uint, char *, ulong *, ++ struct maple_tree_ops *); ++static void do_mt_arange64(ulong, ulong, ulong, uint, char *, ulong *, ++ struct maple_tree_ops *); ++static void do_mt_entry(ulong, ulong, ulong, uint, uint, char *, ulong *, ++ struct maple_tree_ops *); ++static void do_mt_node(ulong, ulong, ulong, uint, char *, ulong *, ++ struct maple_tree_ops *); ++struct req_entry *fill_member_offsets(char *); ++void dump_struct_members_fast(struct req_entry *, int, ulong); ++void dump_struct_members_for_tree(struct tree_data *, int, ulong); ++ ++static void mt_dump_range(ulong min, ulong max, uint depth) ++{ ++ if (min == max) ++ fprintf(fp, "%.*s%lu: ", depth * 2, spaces, min); ++ else ++ fprintf(fp, "%.*s%lu-%lu: ", depth * 2, spaces, min, max); ++} ++ ++static inline bool mt_is_reserved(ulong entry) ++{ ++ return (entry < MAPLE_RESERVED_RANGE) && xa_is_internal(entry); ++} ++ ++static inline bool mte_is_leaf(ulong maple_enode_entry) ++{ ++ return ma_is_leaf(mte_node_type(maple_enode_entry)); ++} ++ ++static uint mt_height(char *mt_buf) ++{ ++ return (UINT(mt_buf + OFFSET(maple_tree_ma_flags)) & ++ MT_FLAGS_HEIGHT_MASK) ++ >> MT_FLAGS_HEIGHT_OFFSET; ++} ++ ++static void dump_mt_range64(char *mr64_buf) ++{ ++ int i; ++ ++ fprintf(fp, " contents: "); ++ for (i = 0; i < mt_slots[maple_range_64] - 1; i++) ++ fprintf(fp, "%p %lu ", ++ VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) ++ + sizeof(void *) * i), ++ ULONG(mr64_buf + OFFSET(maple_range_64_pivot) ++ + sizeof(ulong) * i)); ++ fprintf(fp, "%p\n", VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) ++ + sizeof(void *) * i)); ++} ++ ++static void dump_mt_arange64(char *ma64_buf) ++{ ++ int i; ++ ++ fprintf(fp, " contents: "); ++ for (i = 0; i < mt_slots[maple_arange_64]; i++) ++ fprintf(fp, "%lu ", ULONG(ma64_buf + OFFSET(maple_arange_64_gap) ++ + sizeof(ulong) * i)); ++ ++ fprintf(fp, "| %02X %02X| ", ++ UCHAR(ma64_buf + OFFSET(maple_arange_64_meta) + ++ OFFSET(maple_metadata_end)), ++ UCHAR(ma64_buf + OFFSET(maple_arange_64_meta) + ++ OFFSET(maple_metadata_gap))); ++ ++ for (i = 0; i < mt_slots[maple_arange_64] - 1; i++) ++ fprintf(fp, "%p %lu ", ++ VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + ++ sizeof(void *) * i), ++ ULONG(ma64_buf + OFFSET(maple_arange_64_pivot) + ++ sizeof(ulong) * i)); ++ fprintf(fp, "%p\n", VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + ++ sizeof(void *) * i)); ++} ++ ++static void dump_mt_entry(ulong entry, ulong min, ulong max, uint depth) ++{ ++ mt_dump_range(min, max, depth); ++ ++ if (xa_is_value(entry)) ++ fprintf(fp, "value %ld (0x%lx) [0x%lx]\n", xa_to_value(entry), ++ xa_to_value(entry), entry); ++ else if (xa_is_zero(entry)) ++ fprintf(fp, "zero (%ld)\n", xa_to_internal(entry)); ++ else if (mt_is_reserved(entry)) ++ fprintf(fp, "UNKNOWN ENTRY (0x%lx)\n", entry); ++ else ++ fprintf(fp, "0x%lx\n", entry); ++} ++ ++static void dump_mt_node(ulong maple_node, char *node_data, uint type, ++ ulong min, ulong max, uint depth) ++{ ++ mt_dump_range(min, max, depth); ++ ++ fprintf(fp, "node 0x%lx depth %d type %d parent %p", ++ maple_node, depth, type, ++ maple_node ? VOID_PTR(node_data + OFFSET(maple_node_parent)) : ++ NULL); ++} ++ ++static void do_mt_range64(ulong entry, ulong min, ulong max, ++ uint depth, char *path, ulong *global_index, ++ struct maple_tree_ops *ops) ++{ ++ ulong maple_node_m_node = mte_to_node(entry); ++ char node_buf[MAPLE_BUFSIZE]; ++ bool leaf = mte_is_leaf(entry); ++ ulong first = min, last; ++ int i; ++ int len = strlen(path); ++ struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; ++ char *mr64_buf; ++ ++ if (SIZE(maple_node) > MAPLE_BUFSIZE) ++ error(FATAL, "MAPLE_BUFSIZE should be larger than maple_node struct"); ++ ++ readmem(maple_node_m_node, KVADDR, node_buf, SIZE(maple_node), ++ "mt_dump_range64 read maple_node", FAULT_ON_ERROR); ++ ++ mr64_buf = node_buf + OFFSET(maple_node_mr64); ++ ++ for (i = 0; i < mt_slots[maple_range_64]; i++) { ++ last = max; ++ ++ if (i < (mt_slots[maple_range_64] - 1)) ++ last = ULONG(mr64_buf + OFFSET(maple_range_64_pivot) + ++ sizeof(ulong) * i); ++ ++ else if (!VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) + ++ sizeof(void *) * i) && ++ max != mt_max[mte_node_type(entry)]) ++ break; ++ if (last == 0 && i > 0) ++ break; ++ if (leaf) ++ do_mt_entry(mt_slot((void **)(mr64_buf + ++ OFFSET(maple_range_64_slot)), i), ++ first, last, depth + 1, i, path, global_index, ops); ++ else if (VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) + ++ sizeof(void *) * i)) { ++ sprintf(path + len, "/%d", i); ++ do_mt_node(mt_slot((void **)(mr64_buf + ++ OFFSET(maple_range_64_slot)), i), ++ first, last, depth + 1, path, global_index, ops); ++ } ++ ++ if (last == max) ++ break; ++ if (last > max) { ++ fprintf(fp, "node %p last (%lu) > max (%lu) at pivot %d!\n", ++ mr64_buf, last, max, i); ++ break; ++ } ++ first = last + 1; ++ } ++} ++ ++static void do_mt_arange64(ulong entry, ulong min, ulong max, ++ uint depth, char *path, ulong *global_index, ++ struct maple_tree_ops *ops) ++{ ++ ulong maple_node_m_node = mte_to_node(entry); ++ char node_buf[MAPLE_BUFSIZE]; ++ bool leaf = mte_is_leaf(entry); ++ ulong first = min, last; ++ int i; ++ int len = strlen(path); ++ struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; ++ char *ma64_buf; ++ ++ if (SIZE(maple_node) > MAPLE_BUFSIZE) ++ error(FATAL, "MAPLE_BUFSIZE should be larger than maple_node struct"); ++ ++ readmem(maple_node_m_node, KVADDR, node_buf, SIZE(maple_node), ++ "mt_dump_arange64 read maple_node", FAULT_ON_ERROR); ++ ++ ma64_buf = node_buf + OFFSET(maple_node_ma64); ++ ++ for (i = 0; i < mt_slots[maple_arange_64]; i++) { ++ last = max; ++ ++ if (i < (mt_slots[maple_arange_64] - 1)) ++ last = ULONG(ma64_buf + OFFSET(maple_arange_64_pivot) + ++ sizeof(ulong) * i); ++ else if (!VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + ++ sizeof(void *) * i)) ++ break; ++ if (last == 0 && i > 0) ++ break; ++ ++ if (leaf) ++ do_mt_entry(mt_slot((void **)(ma64_buf + ++ OFFSET(maple_arange_64_slot)), i), ++ first, last, depth + 1, i, path, global_index, ops); ++ else if (VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + ++ sizeof(void *) * i)) { ++ sprintf(path + len, "/%d", i); ++ do_mt_node(mt_slot((void **)(ma64_buf + ++ OFFSET(maple_arange_64_slot)), i), ++ first, last, depth + 1, path, global_index, ops); ++ } ++ ++ if (last == max) ++ break; ++ if (last > max) { ++ fprintf(fp, "node %p last (%lu) > max (%lu) at pivot %d!\n", ++ ma64_buf, last, max, i); ++ break; ++ } ++ first = last + 1; ++ } ++} ++ ++static void do_mt_entry(ulong entry, ulong min, ulong max, uint depth, ++ uint index, char *path, ulong *global_index, ++ struct maple_tree_ops *ops) ++{ ++ int print_radix = 0, i; ++ static struct req_entry **e = NULL; ++ struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; ++ ++ if (!td) ++ return; ++} ++ ++static void do_mt_node(ulong entry, ulong min, ulong max, ++ uint depth, char *path, ulong *global_index, ++ struct maple_tree_ops *ops) ++{ ++ ulong maple_node = mte_to_node(entry); ++ uint type = mte_node_type(entry); ++ uint i; ++ char node_buf[MAPLE_BUFSIZE]; ++ struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; ++ ++ if (SIZE(maple_node) > MAPLE_BUFSIZE) ++ error(FATAL, "MAPLE_BUFSIZE should be larger than maple_node struct"); ++ ++ readmem(maple_node, KVADDR, node_buf, SIZE(maple_node), ++ "mt_dump_node read maple_node", FAULT_ON_ERROR); ++ ++ switch (type) { ++ case maple_dense: ++ for (i = 0; i < mt_slots[maple_dense]; i++) { ++ if (min + i > max) ++ fprintf(fp, "OUT OF RANGE: "); ++ do_mt_entry(mt_slot((void **)(node_buf + OFFSET(maple_node_slot)), i), ++ min + i, min + i, depth, i, path, global_index, ops); ++ } ++ break; ++ case maple_leaf_64: ++ case maple_range_64: ++ do_mt_range64(entry, min, max, depth, path, global_index, ops); ++ break; ++ case maple_arange_64: ++ do_mt_arange64(entry, min, max, depth, path, global_index, ops); ++ break; ++ default: ++ fprintf(fp, " UNKNOWN TYPE\n"); ++ } ++} ++ ++static int do_maple_tree_traverse(ulong ptr, int is_root, ++ struct maple_tree_ops *ops) ++{ ++ char path[BUFSIZE] = {0}; ++ char tree_buf[MAPLE_BUFSIZE]; ++ ulong entry; ++ struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; ++ ulong global_index = 0; ++ ++ if (SIZE(maple_tree) > MAPLE_BUFSIZE) ++ error(FATAL, "MAPLE_BUFSIZE should be larger than maple_tree struct"); ++ ++ if (!is_root) { ++ strcpy(path, "direct"); ++ do_mt_node(ptr, 0, mt_max[mte_node_type(ptr)], ++ 0, path, &global_index, ops); ++ } else { ++ readmem(ptr, KVADDR, tree_buf, SIZE(maple_tree), ++ "mt_dump read maple_tree", FAULT_ON_ERROR); ++ entry = ULONG(tree_buf + OFFSET(maple_tree_ma_root)); ++ ++ if (!xa_is_node(entry)) ++ do_mt_entry(entry, 0, 0, 0, 0, path, &global_index, ops); ++ else if (entry) { ++ strcpy(path, "root"); ++ do_mt_node(entry, 0, mt_max[mte_node_type(entry)], 0, ++ path, &global_index, ops); ++ } ++ } ++ return 0; ++} ++ ++int do_mptree(struct tree_data *td) ++{ ++ struct maple_tree_ops ops = { ++ .entry = NULL, ++ .private = td, ++ .is_td = true, ++ }; ++ ++ int is_root = !(td->flags & TREE_NODE_POINTER); ++ ++ do_maple_tree_traverse(td->start, is_root, &ops); ++ ++ return 0; ++} ++ ++/***********************************************/ ++void maple_init(void) ++{ ++ int array_len; ++ ++ STRUCT_SIZE_INIT(maple_tree, "maple_tree"); ++ STRUCT_SIZE_INIT(maple_node, "maple_node"); ++ ++ MEMBER_OFFSET_INIT(maple_tree_ma_root, "maple_tree", "ma_root"); ++ MEMBER_OFFSET_INIT(maple_tree_ma_flags, "maple_tree", "ma_flags"); ++ ++ MEMBER_OFFSET_INIT(maple_node_parent, "maple_node", "parent"); ++ MEMBER_OFFSET_INIT(maple_node_ma64, "maple_node", "ma64"); ++ MEMBER_OFFSET_INIT(maple_node_mr64, "maple_node", "mr64"); ++ MEMBER_OFFSET_INIT(maple_node_slot, "maple_node", "slot"); ++ ++ MEMBER_OFFSET_INIT(maple_arange_64_pivot, "maple_arange_64", "pivot"); ++ MEMBER_OFFSET_INIT(maple_arange_64_slot, "maple_arange_64", "slot"); ++ MEMBER_OFFSET_INIT(maple_arange_64_gap, "maple_arange_64", "gap"); ++ MEMBER_OFFSET_INIT(maple_arange_64_meta, "maple_arange_64", "meta"); ++ ++ MEMBER_OFFSET_INIT(maple_range_64_pivot, "maple_range_64", "pivot"); ++ MEMBER_OFFSET_INIT(maple_range_64_slot, "maple_range_64", "slot"); ++ ++ MEMBER_OFFSET_INIT(maple_metadata_end, "maple_metadata", "end"); ++ MEMBER_OFFSET_INIT(maple_metadata_gap, "maple_metadata", "gap"); ++ ++ array_len = get_array_length("mt_slots", NULL, sizeof(char)); ++ mt_slots = calloc(array_len, sizeof(char)); ++ readmem(symbol_value("mt_slots"), KVADDR, mt_slots, ++ array_len * sizeof(char), "maple_init read mt_slots", ++ RETURN_ON_ERROR); ++ ++ array_len = get_array_length("mt_pivots", NULL, sizeof(char)); ++ mt_pivots = calloc(array_len, sizeof(char)); ++ readmem(symbol_value("mt_pivots"), KVADDR, mt_pivots, ++ array_len * sizeof(char), "maple_init read mt_pivots", ++ RETURN_ON_ERROR); ++ ++ mt_max[maple_dense] = mt_slots[maple_dense]; ++ mt_max[maple_leaf_64] = ULONG_MAX; ++ mt_max[maple_range_64] = ULONG_MAX; ++ mt_max[maple_arange_64] = ULONG_MAX; ++} +Index: crash-8.0.2/maple_tree.h +=================================================================== +--- /dev/null ++++ crash-8.0.2/maple_tree.h +@@ -0,0 +1,82 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++#ifndef _MAPLE_TREE_H ++#define _MAPLE_TREE_H ++/* ++ * Maple Tree - An RCU-safe adaptive tree for storing ranges ++ * Copyright (c) 2018-2022 Oracle ++ * Authors: Liam R. Howlett ++ * Matthew Wilcox ++ * ++ * eXtensible Arrays ++ * Copyright (c) 2017 Microsoft Corporation ++ * Author: Matthew Wilcox ++ * ++ * See Documentation/core-api/xarray.rst for how to use the XArray. ++ */ ++#include ++#include ++#include ++ ++/* ++ * The following are copied and modified from include/linux/maple_tree.h ++ */ ++ ++enum maple_type { ++ maple_dense, ++ maple_leaf_64, ++ maple_range_64, ++ maple_arange_64, ++}; ++ ++#define MAPLE_NODE_MASK 255UL ++ ++#define MT_FLAGS_HEIGHT_OFFSET 0x02 ++#define MT_FLAGS_HEIGHT_MASK 0x7C ++ ++#define MAPLE_NODE_TYPE_MASK 0x0F ++#define MAPLE_NODE_TYPE_SHIFT 0x03 ++ ++#define MAPLE_RESERVED_RANGE 4096 ++ ++/* ++ * The following are copied and modified from include/linux/xarray.h ++ */ ++ ++#define XA_ZERO_ENTRY xa_mk_internal(257) ++ ++static inline ulong xa_mk_internal(ulong v) ++{ ++ return (v << 2) | 2; ++} ++ ++static inline bool xa_is_internal(ulong entry) ++{ ++ return (entry & 3) == 2; ++} ++ ++static inline bool xa_is_node(ulong entry) ++{ ++ return xa_is_internal(entry) && entry > 4096; ++} ++ ++static inline bool xa_is_value(ulong entry) ++{ ++ return entry & 1; ++} ++ ++static inline bool xa_is_zero(ulong entry) ++{ ++ return entry == XA_ZERO_ENTRY; ++} ++ ++static inline unsigned long xa_to_internal(ulong entry) ++{ ++ return entry >> 2; ++} ++ ++static inline unsigned long xa_to_value(ulong entry) ++{ ++ return entry >> 1; ++} ++ ++#endif /* _MAPLE_TREE_H */ +Index: crash-8.0.2/Makefile +=================================================================== +--- crash-8.0.2.orig/Makefile ++++ crash-8.0.2/Makefile +@@ -59,6 +59,7 @@ IBM_HFILES=ibm_common.h + SADUMP_HFILES=sadump.h + UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h + VMWARE_HFILES=vmware_vmss.h ++MAPLE_TREE_HFILES=maple_tree.h + + CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ + kernel.c test.c gdb_interface.c configure.c net.c dev.c bpf.c \ +@@ -73,12 +74,12 @@ CFILES=main.c tools.c global_data.c memo + xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \ + xen_hyper_dump_tables.c kvmdump.c qemu.c qemu-load.c sadump.c ipcs.c \ + ramdump.c vmware_vmss.c vmware_guestdump.c \ +- xen_dom0.c kaslr_helper.c sbitmap.c ++ xen_dom0.c kaslr_helper.c sbitmap.c maple_tree.c + + SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \ + ${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \ + ${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${LKCD_OBSOLETE_HFILES}\ +- ${IBM_HFILES} ${SADUMP_HFILES} ${VMWARE_HFILES} ++ ${IBM_HFILES} ${SADUMP_HFILES} ${VMWARE_HFILES} ${MAPLE_TREE_HFILES} + + OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ + build_data.o kernel.o test.o gdb_interface.o net.o dev.o bpf.o \ +@@ -93,7 +94,7 @@ OBJECT_FILES=main.o tools.o global_data. + xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \ + xen_hyper_dump_tables.o kvmdump.o qemu.o qemu-load.o sadump.o ipcs.o \ + ramdump.o vmware_vmss.o vmware_guestdump.o \ +- xen_dom0.o kaslr_helper.o sbitmap.o ++ xen_dom0.o kaslr_helper.o sbitmap.o maple_tree.o + + MEMORY_DRIVER_FILES=memory_driver/Makefile memory_driver/crash.c memory_driver/README + +@@ -536,6 +537,9 @@ kaslr_helper.o: ${GENERIC_HFILES} kaslr_ + bpf.o: ${GENERIC_HFILES} bpf.c + ${CC} -c ${CRASH_CFLAGS} bpf.c ${WARNING_OPTIONS} ${WARNING_ERROR} + ++maple_tree.o: ${GENERIC_HFILES} ${MAPLE_TREE_HFILES} maple_tree.c ++ ${CC} -c ${CRASH_CFLAGS} maple_tree.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ + ${PROGRAM}: force + @$(MAKE) all + diff -Nru crash-8.0.2/debian/patches/lp2038249-0009-Add-do_maple_tree-for-maple-tree-operations.patch crash-8.0.2/debian/patches/lp2038249-0009-Add-do_maple_tree-for-maple-tree-operations.patch --- crash-8.0.2/debian/patches/lp2038249-0009-Add-do_maple_tree-for-maple-tree-operations.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0009-Add-do_maple_tree-for-maple-tree-operations.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,195 @@ +From: Tao Liu +Date: Tue Jan 10 14:56:29 2023 +0800 +Subject: Add do_maple_tree() for maple tree operations + +do_maple_tree() is similar to do_radix_tree() and do_xarray(), which +takes the same do_maple_tree_traverse entry as tree command. + +Signed-off-by: Tao Liu + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/222176a0a6c14b6a1cdcebb8dda020ccb17b90f8 + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -5580,6 +5580,12 @@ int cleanup_memory_driver(void); + + void maple_init(void); + int do_mptree(struct tree_data *); ++ulong do_maple_tree(ulong, int, struct list_pair *); ++#define MAPLE_TREE_COUNT (1) ++#define MAPLE_TREE_SEARCH (2) ++#define MAPLE_TREE_DUMP (3) ++#define MAPLE_TREE_GATHER (4) ++#define MAPLE_TREE_DUMP_CB (5) + + /* + * help.c +--- crash-8.0.2.orig/maple_tree.c ++++ crash-8.0.2/maple_tree.c +@@ -40,6 +40,12 @@ static inline bool ma_is_leaf(const enum + + /*************** For cmd_tree ********************/ + ++struct do_maple_tree_info { ++ ulong maxcount; ++ ulong count; ++ void *data; ++}; ++ + struct maple_tree_ops { + void (*entry)(ulong node, ulong slot, const char *path, + ulong index, void *private); +@@ -273,6 +279,9 @@ static void do_mt_entry(ulong entry, ulo + static struct req_entry **e = NULL; + struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; + ++ if (ops->entry) ++ ops->entry(entry, entry, path, max, ops->private); ++ + if (!td) + return; + } +@@ -361,6 +370,142 @@ int do_mptree(struct tree_data *td) + return 0; + } + ++/************* For do_maple_tree *****************/ ++static void do_maple_tree_count(ulong node, ulong slot, const char *path, ++ ulong index, void *private) ++{ ++ struct do_maple_tree_info *info = private; ++ info->count++; ++} ++ ++static void do_maple_tree_search(ulong node, ulong slot, const char *path, ++ ulong index, void *private) ++{ ++ struct do_maple_tree_info *info = private; ++ struct list_pair *lp = info->data; ++ ++ if (lp->index == index) { ++ lp->value = (void *)slot; ++ info->count = 1; ++ } ++} ++ ++static void do_maple_tree_dump(ulong node, ulong slot, const char *path, ++ ulong index, void *private) ++{ ++ struct do_maple_tree_info *info = private; ++ fprintf(fp, "[%lu] %lx\n", index, slot); ++ info->count++; ++} ++ ++static void do_maple_tree_gather(ulong node, ulong slot, const char *path, ++ ulong index, void *private) ++{ ++ struct do_maple_tree_info *info = private; ++ struct list_pair *lp = info->data; ++ ++ if (info->maxcount) { ++ lp[info->count].index = index; ++ lp[info->count].value = (void *)slot; ++ ++ info->count++; ++ info->maxcount--; ++ } ++} ++ ++static void do_maple_tree_dump_cb(ulong node, ulong slot, const char *path, ++ ulong index, void *private) ++{ ++ struct do_maple_tree_info *info = private; ++ struct list_pair *lp = info->data; ++ int (*cb)(ulong) = lp->value; ++ ++ /* Caller defined operation */ ++ if (!cb(slot)) { ++ error(FATAL, "do_maple_tree: callback " ++ "operation failed: entry: %ld item: %lx\n", ++ info->count, slot); ++ } ++ info->count++; ++} ++ ++/* ++ * do_maple_tree argument usage: ++ * ++ * root: Address of a maple_tree_root structure ++ * ++ * flag: MAPLE_TREE_COUNT - Return the number of entries in the tree. ++ * MAPLE_TREE_SEARCH - Search for an entry at lp->index; if found, ++ * store the entry in lp->value and return a count of 1; otherwise ++ * return a count of 0. ++ * MAPLE_TREE_DUMP - Dump all existing index/value pairs. ++ * MAPLE_TREE_GATHER - Store all existing index/value pairs in the ++ * passed-in array of list_pair structs starting at lp, ++ * returning the count of entries stored; the caller can/should ++ * limit the number of returned entries by putting the array size ++ * (max count) in the lp->index field of the first structure ++ * in the passed-in array. ++ * MAPLE_TREE_DUMP_CB - Similar with MAPLE_TREE_DUMP, but for each ++ * maple tree entry, a user defined callback at lp->value will ++ * be invoked. ++ * ++ * lp: Unused by MAPLE_TREE_COUNT and MAPLE_TREE_DUMP. ++ * A pointer to a list_pair structure for MAPLE_TREE_SEARCH. ++ * A pointer to an array of list_pair structures for ++ * MAPLE_TREE_GATHER; the dimension (max count) of the array may ++ * be stored in the index field of the first structure to avoid ++ * any chance of an overrun. ++ * For MAPLE_TREE_DUMP_CB, the lp->value must be initialized as a ++ * callback function. The callback prototype must be: int (*)(ulong); ++ */ ++ulong ++do_maple_tree(ulong root, int flag, struct list_pair *lp) ++{ ++ struct do_maple_tree_info info = { ++ .count = 0, ++ .data = lp, ++ }; ++ struct maple_tree_ops ops = { ++ .private = &info, ++ .is_td = false, ++ }; ++ ++ switch (flag) ++ { ++ case MAPLE_TREE_COUNT: ++ ops.entry = do_maple_tree_count; ++ break; ++ ++ case MAPLE_TREE_SEARCH: ++ ops.entry = do_maple_tree_search; ++ break; ++ ++ case MAPLE_TREE_DUMP: ++ ops.entry = do_maple_tree_dump; ++ break; ++ ++ case MAPLE_TREE_GATHER: ++ if (!(info.maxcount = lp->index)) ++ info.maxcount = (ulong)(-1); /* caller beware */ ++ ++ ops.entry = do_maple_tree_gather; ++ break; ++ ++ case MAPLE_TREE_DUMP_CB: ++ if (lp->value == NULL) { ++ error(FATAL, "do_maple_tree: need set callback function"); ++ } ++ ops.entry = do_maple_tree_dump_cb; ++ break; ++ ++ default: ++ error(FATAL, "do_maple_tree: invalid flag: %lx\n", flag); ++ } ++ ++ do_maple_tree_traverse(root, true, &ops); ++ return info.count; ++} ++ + /***********************************************/ + void maple_init(void) + { diff -Nru crash-8.0.2/debian/patches/lp2038249-0010-Introduce-maple-tree-vma-iteration-to-vm_area_dump.patch crash-8.0.2/debian/patches/lp2038249-0010-Introduce-maple-tree-vma-iteration-to-vm_area_dump.patch --- crash-8.0.2/debian/patches/lp2038249-0010-Introduce-maple-tree-vma-iteration-to-vm_area_dump.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0010-Introduce-maple-tree-vma-iteration-to-vm_area_dump.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,449 @@ +From: Tao Liu +Date: Tue Jan 10 14:56:30 2023 +0800 +Subject: Introduce maple tree vma iteration to vm_area_dump() + +Since memory.c:vm_area_dump() will iterate all vma, this patch mainly +introduces maple tree vma iteration to it. + +We extract the code which handles each vma into a function. If +mm_struct_mmap exist, aka the linked list of vma iteration available, +we goto the original way; if not and mm_struct_mm_mt exist, aka +maple tree is available, then we goto the maple tree vma iteration. + +Signed-off-by: Tao Liu + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/9efc1f68a44f6fe521e64efe4a3dc36e9ba0bbc1 + +Index: crash-8.0.2/memory.c +=================================================================== +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include "maple_tree.h" + + struct meminfo { /* general purpose memory information structure */ + ulong cache; /* used by the various memory searching/dumping */ +@@ -137,6 +138,27 @@ struct searchinfo { + char buf[BUFSIZE]; + }; + ++struct handle_each_vm_area_args { ++ ulong task; ++ ulong flag; ++ ulong vaddr; ++ struct reference *ref; ++ char *vma_header; ++ char *buf1; ++ char *buf2; ++ char *buf3; ++ char *buf4; ++ char *buf5; ++ ulong vma; ++ char **vma_buf; ++ struct task_mem_usage *tm; ++ int *found; ++ int *single_vma_found; ++ unsigned int radix; ++ struct task_context *tc; ++ ulong *single_vma; ++}; ++ + static char *memtype_string(int, int); + static char *error_handle_string(ulong); + static void collect_page_member_data(char *, struct meminfo *); +@@ -299,6 +321,7 @@ static void dump_page_flags(ulonglong); + static ulong kmem_cache_nodelists(ulong); + static void dump_hstates(void); + static ulong freelist_ptr(struct meminfo *, ulong, ulong); ++static ulong handle_each_vm_area(struct handle_each_vm_area_args *); + + /* + * Memory display modes specific to this file. +@@ -363,6 +386,10 @@ vm_init(void) + + MEMBER_OFFSET_INIT(task_struct_mm, "task_struct", "mm"); + MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); ++ MEMBER_OFFSET_INIT(mm_struct_mm_mt, "mm_struct", "mm_mt"); ++ if (VALID_MEMBER(mm_struct_mm_mt)) { ++ maple_init(); ++ } + MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); + MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); + if (!VALID_MEMBER(mm_struct_rss)) +@@ -3874,7 +3901,7 @@ bailout: + * for references -- and only then does a display + */ + +-#define PRINT_VM_DATA() \ ++#define PRINT_VM_DATA(buf4, buf5, tm) \ + { \ + fprintf(fp, "%s %s ", \ + mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, "MM"), \ +@@ -3896,9 +3923,9 @@ bailout: + mkstring(buf5, 8, CENTER|LJUST, NULL)); \ + } + +-#define PRINT_VMA_DATA() \ ++#define PRINT_VMA_DATA(buf1, buf2, buf3, buf4, vma) \ + fprintf(fp, "%s%s%s%s%s %6llx%s%s\n", \ +- mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, MKSTR(vma)), \ ++ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, MKSTR(vma)),\ + space(MINSPACE), \ + mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), \ + space(MINSPACE), \ +@@ -3925,18 +3952,137 @@ bailout: + (DO_REF_SEARCH(X) && (string_exists(S)) && FILENAME_COMPONENT((S),(X)->str)) + #define VM_REF_FOUND(X) ((X) && ((X)->cmdflags & VM_REF_HEADER)) + +-ulong +-vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref) ++static ulong handle_each_vm_area(struct handle_each_vm_area_args *args) + { +- struct task_context *tc; +- ulong vma; ++ char *dentry_buf, *file_buf; + ulong vm_start; + ulong vm_end; +- ulong vm_next, vm_mm; +- char *dentry_buf, *vma_buf, *file_buf; ++ ulong vm_mm; + ulonglong vm_flags; + ulong vm_file, inode; + ulong dentry, vfsmnt; ++ ++ if ((args->flag & PHYSADDR) && !DO_REF_SEARCH(args->ref)) ++ fprintf(fp, "%s", args->vma_header); ++ ++ inode = 0; ++ BZERO(args->buf1, BUFSIZE); ++ *(args->vma_buf) = fill_vma_cache(args->vma); ++ ++ vm_mm = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_mm)); ++ vm_end = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_end)); ++ vm_start = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_start)); ++ vm_flags = get_vm_flags(*(args->vma_buf)); ++ vm_file = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_file)); ++ ++ if (args->flag & PRINT_SINGLE_VMA) { ++ if (args->vma != *(args->single_vma)) ++ return 0; ++ fprintf(fp, "%s", args->vma_header); ++ *(args->single_vma_found) = TRUE; ++ } ++ ++ if (args->flag & PRINT_VMA_STRUCTS) { ++ dump_struct("vm_area_struct", args->vma, args->radix); ++ return 0; ++ } ++ ++ if (vm_file && !(args->flag & VERIFY_ADDR)) { ++ file_buf = fill_file_cache(vm_file); ++ dentry = ULONG(file_buf + OFFSET(file_f_dentry)); ++ dentry_buf = NULL; ++ if (dentry) { ++ dentry_buf = fill_dentry_cache(dentry); ++ if (VALID_MEMBER(file_f_vfsmnt)) { ++ vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); ++ get_pathname(dentry, args->buf1, BUFSIZE, 1, vfsmnt); ++ } else ++ get_pathname(dentry, args->buf1, BUFSIZE, 1, 0); ++ } ++ if ((args->flag & PRINT_INODES) && dentry) ++ inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); ++ } ++ ++ if (!(args->flag & UVADDR) || ((args->flag & UVADDR) && ++ ((args->vaddr >= vm_start) && (args->vaddr < vm_end)))) { ++ *(args->found) = TRUE; ++ ++ if (args->flag & VERIFY_ADDR) ++ return args->vma; ++ ++ if (DO_REF_SEARCH(args->ref)) { ++ if (VM_REF_CHECK_HEXVAL(args->ref, args->vma) || ++ VM_REF_CHECK_HEXVAL(args->ref, (ulong)vm_flags) || ++ VM_REF_CHECK_STRING(args->ref, args->buf1)) { ++ if (!(args->ref->cmdflags & VM_REF_HEADER)) { ++ print_task_header(fp, args->tc, 0); ++ PRINT_VM_DATA(args->buf4, args->buf5, args->tm); ++ args->ref->cmdflags |= VM_REF_HEADER; ++ } ++ if (!(args->ref->cmdflags & VM_REF_VMA) || ++ (args->ref->cmdflags & VM_REF_PAGE)) { ++ fprintf(fp, "%s", args->vma_header); ++ args->ref->cmdflags |= VM_REF_VMA; ++ args->ref->cmdflags &= ~VM_REF_PAGE; ++ args->ref->ref1 = args->vma; ++ } ++ PRINT_VMA_DATA(args->buf1, args->buf2, ++ args->buf3, args->buf4, args->vma); ++ } ++ ++ if (vm_area_page_dump(args->vma, args->task, ++ vm_start, vm_end, vm_mm, args->ref)) { ++ if (!(args->ref->cmdflags & VM_REF_HEADER)) { ++ print_task_header(fp, args->tc, 0); ++ PRINT_VM_DATA(args->buf4, args->buf5, args->tm); ++ args->ref->cmdflags |= VM_REF_HEADER; ++ } ++ if (!(args->ref->cmdflags & VM_REF_VMA) || ++ (args->ref->ref1 != args->vma)) { ++ fprintf(fp, "%s", args->vma_header); ++ PRINT_VMA_DATA(args->buf1, args->buf2, ++ args->buf3, args->buf4, args->vma); ++ args->ref->cmdflags |= VM_REF_VMA; ++ args->ref->ref1 = args->vma; ++ } ++ ++ args->ref->cmdflags |= VM_REF_DISPLAY; ++ vm_area_page_dump(args->vma, args->task, ++ vm_start, vm_end, vm_mm, args->ref); ++ args->ref->cmdflags &= ~VM_REF_DISPLAY; ++ } ++ ++ return 0; ++ } ++ ++ if (inode) { ++ fprintf(fp, "%lx%s%s%s%s%s%6llx%s%lx %s\n", ++ args->vma, space(MINSPACE), ++ mkstring(args->buf2, UVADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(vm_start)), space(MINSPACE), ++ mkstring(args->buf3, UVADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(vm_end)), space(MINSPACE), ++ vm_flags, space(MINSPACE), inode, args->buf1); ++ } else { ++ PRINT_VMA_DATA(args->buf1, args->buf2, ++ args->buf3, args->buf4, args->vma); ++ ++ if (args->flag & (PHYSADDR|PRINT_SINGLE_VMA)) ++ vm_area_page_dump(args->vma, args->task, ++ vm_start, vm_end, vm_mm, args->ref); ++ } ++ ++ if (args->flag & UVADDR) ++ return args->vma; ++ } ++ return 0; ++} ++ ++ulong ++vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref) ++{ ++ struct task_context *tc; ++ ulong vma; + ulong single_vma; + unsigned int radix; + int single_vma_found; +@@ -3948,6 +4094,10 @@ vm_area_dump(ulong task, ulong flag, ulo + char buf4[BUFSIZE]; + char buf5[BUFSIZE]; + char vma_header[BUFSIZE]; ++ char *vma_buf; ++ int i; ++ ulong mm_mt, entry_num; ++ struct list_pair *entry_list; + + tc = task_to_context(task); + tm = &task_mem_usage; +@@ -3981,14 +4131,14 @@ vm_area_dump(ulong task, ulong flag, ulo + if (VM_REF_CHECK_HEXVAL(ref, tm->mm_struct_addr) || + VM_REF_CHECK_HEXVAL(ref, tm->pgd_addr)) { + print_task_header(fp, tc, 0); +- PRINT_VM_DATA(); ++ PRINT_VM_DATA(buf4, buf5, tm); + fprintf(fp, "\n"); + return (ulong)NULL; + } + + if (!(flag & (UVADDR|PRINT_MM_STRUCT|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && + !DO_REF_SEARCH(ref)) +- PRINT_VM_DATA(); ++ PRINT_VM_DATA(buf4, buf5, tm); + + if (!tm->mm_struct_addr) { + if (pc->curcmd_flags & MM_STRUCT_FORCE) { +@@ -4012,9 +4162,6 @@ vm_area_dump(ulong task, ulong flag, ulo + return (ulong)NULL; + } + +- readmem(tm->mm_struct_addr + OFFSET(mm_struct_mmap), KVADDR, +- &vma, sizeof(void *), "mm_struct mmap", FAULT_ON_ERROR); +- + sprintf(vma_header, "%s%s%s%s%s FLAGS%sFILE\n", + mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "VMA"), + space(MINSPACE), +@@ -4027,125 +4174,41 @@ vm_area_dump(ulong task, ulong flag, ulo + !DO_REF_SEARCH(ref)) + fprintf(fp, "%s", vma_header); + +- for (found = FALSE; vma; vma = vm_next) { +- +- if ((flag & PHYSADDR) && !DO_REF_SEARCH(ref)) +- fprintf(fp, "%s", vma_header); +- +- inode = 0; +- BZERO(buf1, BUFSIZE); +- vma_buf = fill_vma_cache(vma); +- +- vm_mm = ULONG(vma_buf + OFFSET(vm_area_struct_vm_mm)); +- vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); +- vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); +- vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); +- vm_flags = get_vm_flags(vma_buf); +- vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); +- +- if (flag & PRINT_SINGLE_VMA) { +- if (vma != single_vma) +- continue; +- fprintf(fp, "%s", vma_header); +- single_vma_found = TRUE; +- } +- +- if (flag & PRINT_VMA_STRUCTS) { +- dump_struct("vm_area_struct", vma, radix); +- continue; +- } ++ found = FALSE; + +- if (vm_file && !(flag & VERIFY_ADDR)) { +- file_buf = fill_file_cache(vm_file); +- dentry = ULONG(file_buf + OFFSET(file_f_dentry)); +- dentry_buf = NULL; +- if (dentry) { +- dentry_buf = fill_dentry_cache(dentry); +- if (VALID_MEMBER(file_f_vfsmnt)) { +- vfsmnt = ULONG(file_buf + +- OFFSET(file_f_vfsmnt)); +- get_pathname(dentry, buf1, BUFSIZE, +- 1, vfsmnt); +- } else { +- get_pathname(dentry, buf1, BUFSIZE, +- 1, 0); +- } +- } +- if ((flag & PRINT_INODES) && dentry) { +- inode = ULONG(dentry_buf + +- OFFSET(dentry_d_inode)); ++ struct handle_each_vm_area_args args = { ++ .task = task, .flag = flag, .vaddr = vaddr, ++ .ref = ref, .tc = tc, .radix = radix, ++ .tm = tm, .buf1 = buf1, .buf2 = buf2, ++ .buf3 = buf3, .buf4 = buf4, .buf5 = buf5, ++ .vma_header = vma_header, .single_vma = &single_vma, ++ .single_vma_found = &single_vma_found, .found = &found, ++ .vma_buf = &vma_buf, ++ }; ++ ++ if (INVALID_MEMBER(mm_struct_mmap) && VALID_MEMBER(mm_struct_mm_mt)) { ++ mm_mt = tm->mm_struct_addr + OFFSET(mm_struct_mm_mt); ++ entry_num = do_maple_tree(mm_mt, MAPLE_TREE_COUNT, NULL); ++ entry_list = (struct list_pair *)GETBUF(entry_num * sizeof(struct list_pair)); ++ do_maple_tree(mm_mt, MAPLE_TREE_GATHER, entry_list); ++ ++ for (i = 0; i < entry_num; i++) { ++ if (!!(args.vma = (ulong)entry_list[i].value) && ++ handle_each_vm_area(&args)) { ++ FREEBUF(entry_list); ++ return args.vma; + } + } +- +- if (!(flag & UVADDR) || ((flag & UVADDR) && +- ((vaddr >= vm_start) && (vaddr < vm_end)))) { +- found = TRUE; +- +- if (flag & VERIFY_ADDR) +- return vma; +- +- if (DO_REF_SEARCH(ref)) { +- if (VM_REF_CHECK_HEXVAL(ref, vma) || +- VM_REF_CHECK_HEXVAL(ref, (ulong)vm_flags) || +- VM_REF_CHECK_STRING(ref, buf1)) { +- if (!(ref->cmdflags & VM_REF_HEADER)) { +- print_task_header(fp, tc, 0); +- PRINT_VM_DATA(); +- ref->cmdflags |= VM_REF_HEADER; +- } +- if (!(ref->cmdflags & VM_REF_VMA) || +- (ref->cmdflags & VM_REF_PAGE)) { +- fprintf(fp, "%s", vma_header); +- ref->cmdflags |= VM_REF_VMA; +- ref->cmdflags &= ~VM_REF_PAGE; +- ref->ref1 = vma; +- } +- PRINT_VMA_DATA(); +- } +- +- if (vm_area_page_dump(vma, task, +- vm_start, vm_end, vm_mm, ref)) { +- if (!(ref->cmdflags & VM_REF_HEADER)) { +- print_task_header(fp, tc, 0); +- PRINT_VM_DATA(); +- ref->cmdflags |= VM_REF_HEADER; +- } +- if (!(ref->cmdflags & VM_REF_VMA) || +- (ref->ref1 != vma)) { +- fprintf(fp, "%s", vma_header); +- PRINT_VMA_DATA(); +- ref->cmdflags |= VM_REF_VMA; +- ref->ref1 = vma; +- } +- +- ref->cmdflags |= VM_REF_DISPLAY; +- vm_area_page_dump(vma, task, +- vm_start, vm_end, vm_mm, ref); +- ref->cmdflags &= ~VM_REF_DISPLAY; +- } +- +- continue; +- } +- +- if (inode) { +- fprintf(fp, "%lx%s%s%s%s%s%6llx%s%lx %s\n", +- vma, space(MINSPACE), +- mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, +- MKSTR(vm_start)), space(MINSPACE), +- mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, +- MKSTR(vm_end)), space(MINSPACE), +- vm_flags, space(MINSPACE), inode, buf1); +- } else { +- PRINT_VMA_DATA(); +- +- if (flag & (PHYSADDR|PRINT_SINGLE_VMA)) +- vm_area_page_dump(vma, task, +- vm_start, vm_end, vm_mm, ref); +- } +- +- if (flag & UVADDR) ++ FREEBUF(entry_list); ++ } else { ++ readmem(tm->mm_struct_addr + OFFSET(mm_struct_mmap), KVADDR, ++ &vma, sizeof(void *), "mm_struct mmap", FAULT_ON_ERROR); ++ while (vma) { ++ args.vma = vma; ++ if (handle_each_vm_area(&args)) + return vma; +- } ++ vma = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); ++ } + } + + if (flag & VERIFY_ADDR) +Index: crash-8.0.2/Makefile +=================================================================== +--- crash-8.0.2.orig/Makefile ++++ crash-8.0.2/Makefile +@@ -355,7 +355,7 @@ filesys.o: ${GENERIC_HFILES} filesys.c + help.o: ${GENERIC_HFILES} help.c + ${CC} -c ${CRASH_CFLAGS} help.c ${WARNING_OPTIONS} ${WARNING_ERROR} + +-memory.o: ${GENERIC_HFILES} memory.c ++memory.o: ${GENERIC_HFILES} ${MAPLE_TREE_HFILES} memory.c + ${CC} -c ${CRASH_CFLAGS} memory.c ${WARNING_OPTIONS} ${WARNING_ERROR} + + test.o: ${GENERIC_HFILES} test.c diff -Nru crash-8.0.2/debian/patches/lp2038249-0011-Dump-maple-tree-offset-variables-by-help-o.patch crash-8.0.2/debian/patches/lp2038249-0011-Dump-maple-tree-offset-variables-by-help-o.patch --- crash-8.0.2/debian/patches/lp2038249-0011-Dump-maple-tree-offset-variables-by-help-o.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0011-Dump-maple-tree-offset-variables-by-help-o.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,45 @@ +From: Tao Liu +Date: Tue Jan 10 14:56:32 2023 +0800 +Subject: Dump maple tree offset variables by "help -o" + +In the previous patches, some variables are added to offset_table and +size_table, print them out with "help -o" command. + +Signed-off-by: Tao Liu + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/46344aa2f92b07ded52cf9841f8db24dd7fe67d7 + +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -10756,6 +10756,21 @@ dump_offset_table(char *spec, ulong make + OFFSET(sbq_wait_state_wait_cnt)); + fprintf(fp, " sbq_wait_state_wait: %ld\n", + OFFSET(sbq_wait_state_wait)); ++ fprintf(fp, " mm_struct_mm_mt: %ld\n", OFFSET(mm_struct_mm_mt)); ++ fprintf(fp, " maple_tree_ma_root: %ld\n", OFFSET(maple_tree_ma_root)); ++ fprintf(fp, " maple_tree_ma_flags: %ld\n", OFFSET(maple_tree_ma_flags)); ++ fprintf(fp, " maple_node_parent: %ld\n", OFFSET(maple_node_parent)); ++ fprintf(fp, " maple_node_ma64: %ld\n", OFFSET(maple_node_ma64)); ++ fprintf(fp, " maple_node_mr64: %ld\n", OFFSET(maple_node_mr64)); ++ fprintf(fp, " maple_node_slot: %ld\n", OFFSET(maple_node_slot)); ++ fprintf(fp, " maple_arange_64_pivot: %ld\n", OFFSET(maple_arange_64_pivot)); ++ fprintf(fp, " maple_arange_64_slot: %ld\n", OFFSET(maple_arange_64_slot)); ++ fprintf(fp, " maple_arange_64_gap: %ld\n", OFFSET(maple_arange_64_gap)); ++ fprintf(fp, " maple_arange_64_meta: %ld\n", OFFSET(maple_arange_64_meta)); ++ fprintf(fp, " maple_range_64_pivot: %ld\n", OFFSET(maple_range_64_pivot)); ++ fprintf(fp, " maple_range_64_slot: %ld\n", OFFSET(maple_range_64_slot)); ++ fprintf(fp, " maple_metadata_end: %ld\n", OFFSET(maple_metadata_end)); ++ fprintf(fp, " maple_metadata_gap: %ld\n", OFFSET(maple_metadata_gap)); + + fprintf(fp, "\n size_table:\n"); + fprintf(fp, " page: %ld\n", SIZE(page)); +@@ -11028,6 +11043,8 @@ dump_offset_table(char *spec, ulong make + fprintf(fp, " sbitmap_queue: %ld\n", SIZE(sbitmap_queue)); + fprintf(fp, " sbq_wait_state: %ld\n", SIZE(sbq_wait_state)); + fprintf(fp, " blk_mq_tags: %ld\n", SIZE(blk_mq_tags)); ++ fprintf(fp, " maple_tree: %ld\n", SIZE(maple_tree)); ++ fprintf(fp, " maple_node: %ld\n", SIZE(maple_node)); + + fprintf(fp, " percpu_counter: %ld\n", SIZE(percpu_counter)); + diff -Nru crash-8.0.2/debian/patches/lp2038249-0012-Fix-kmem-n-option-to-display-memory-blocks-on-Linux-.patch crash-8.0.2/debian/patches/lp2038249-0012-Fix-kmem-n-option-to-display-memory-blocks-on-Linux-.patch --- crash-8.0.2/debian/patches/lp2038249-0012-Fix-kmem-n-option-to-display-memory-blocks-on-Linux-.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0012-Fix-kmem-n-option-to-display-memory-blocks-on-Linux-.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,147 @@ +From: Kazuhito Hagio +Date: Fri Mar 10 02:38:26 2023 +0000 +Subject: Fix "kmem -n" option to display memory blocks on Linux 6.3-rc1 and later + +Kernel commit d2bf38c088e0 ("driver core: remove private pointer from +struct bus_type") removed the bus_type.p member, and the "kmem -n" +option fails with the following error before displaying memory block +information on Linux 6.3-rc1 and later kernels. + + kmem: invalid structure member offset: bus_type_p + FILE: memory.c LINE: 17852 FUNCTION: init_memory_block() + +Search bus_kset.list instead for subsys_private of memory subsys. + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: backport, https://github.com/crash-utility/crash/commit/489093c2183f4f0365d8957e7275cd88225942ce +[chengen - modify defs.h context] + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2198,6 +2198,8 @@ struct offset_table { + long maple_range_64_slot; + long maple_metadata_end; + long maple_metadata_gap; ++ long kset_kobj; ++ long subsys_private_subsys; + }; + + struct size_table { /* stash of commonly-used sizes */ +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -17771,6 +17771,13 @@ static void + init_memory_block_offset(void) + { + MEMBER_OFFSET_INIT(bus_type_p, "bus_type", "p"); ++ if (INVALID_MEMBER(bus_type_p)) { ++ MEMBER_OFFSET_INIT(kset_list, "kset", "list"); ++ MEMBER_OFFSET_INIT(kset_kobj, "kset", "kobj"); ++ MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); ++ MEMBER_OFFSET_INIT(kobject_entry, "kobject", "entry"); ++ MEMBER_OFFSET_INIT(subsys_private_subsys, "subsys_private", "subsys"); ++ } + MEMBER_OFFSET_INIT(subsys_private_klist_devices, + "subsys_private", "klist_devices"); + MEMBER_OFFSET_INIT(klist_k_list, "klist", "k_list"); +@@ -17791,15 +17798,60 @@ init_memory_block_offset(void) + } + + static void +-init_memory_block(struct list_data *ld, int *klistcnt, ulong **klistbuf) ++init_memory_block(int *klistcnt, ulong **klistbuf) + { +- ulong memory_subsys = symbol_value("memory_subsys"); + ulong private, klist, start; ++ struct list_data list_data, *ld; ++ ++ ld = &list_data; ++ private = 0; + + init_memory_block_offset(); + +- readmem(memory_subsys + OFFSET(bus_type_p), KVADDR, &private, +- sizeof(void *), "memory_subsys.private", FAULT_ON_ERROR); ++ /* ++ * v6.3-rc1 ++ * d2bf38c088e0 driver core: remove private pointer from struct bus_type ++ */ ++ if (INVALID_MEMBER(bus_type_p)) { ++ int i, cnt; ++ char buf[32]; ++ ulong bus_kset, list, name; ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ++ get_symbol_data("bus_kset", sizeof(ulong), &bus_kset); ++ readmem(bus_kset + OFFSET(kset_list), KVADDR, &list, ++ sizeof(ulong), "bus_kset.list", FAULT_ON_ERROR); ++ ++ ld->flags |= LIST_ALLOCATE; ++ ld->start = list; ++ ld->end = bus_kset + OFFSET(kset_list); ++ ld->list_head_offset = OFFSET(kobject_entry); ++ ++ cnt = do_list(ld); ++ for (i = 0; i < cnt; i++) { ++ readmem(ld->list_ptr[i] + OFFSET(kobject_name), KVADDR, &name, ++ sizeof(ulong), "kobject.name", FAULT_ON_ERROR); ++ read_string(name, buf, sizeof(buf)-1); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "kobject: %lx name: %s\n", ld->list_ptr[i], buf); ++ if (STREQ(buf, "memory")) { ++ /* entry is subsys_private.subsys.kobj. See bus_to_subsys(). */ ++ private = ld->list_ptr[i] - OFFSET(kset_kobj) ++ - OFFSET(subsys_private_subsys); ++ break; ++ } ++ } ++ FREEBUF(ld->list_ptr); ++ } else { ++ ulong memory_subsys = symbol_value("memory_subsys"); ++ readmem(memory_subsys + OFFSET(bus_type_p), KVADDR, &private, ++ sizeof(void *), "memory_subsys.private", FAULT_ON_ERROR); ++ } ++ ++ if (!private) ++ error(FATAL, "cannot determine subsys_private for memory.\n"); ++ + klist = private + OFFSET(subsys_private_klist_devices) + + OFFSET(klist_k_list); + BZERO(ld, sizeof(struct list_data)); +@@ -17824,7 +17876,6 @@ dump_memory_blocks(int initialize) + ulong memory_block, device; + ulong *klistbuf; + int klistcnt, i; +- struct list_data list_data; + char mb_hdr[BUFSIZE]; + char paddr_hdr[BUFSIZE]; + char buf1[BUFSIZE]; +@@ -17841,7 +17892,7 @@ dump_memory_blocks(int initialize) + if (initialize) + return; + +- init_memory_block(&list_data, &klistcnt, &klistbuf); ++ init_memory_block(&klistcnt, &klistbuf); + + if ((symbol_exists("memory_block_size_probed")) || + (MEMBER_EXISTS("memory_block", "end_section_nr"))) +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -10384,6 +10384,7 @@ dump_offset_table(char *spec, ulong make + OFFSET(kobject_entry)); + fprintf(fp, " kset_list: %ld\n", + OFFSET(kset_list)); ++ fprintf(fp, " kset_kobj: %ld\n", OFFSET(kset_kobj)); + fprintf(fp, " request_list_count: %ld\n", + OFFSET(request_list_count)); + fprintf(fp, " request_cmd_flags: %ld\n", +@@ -10421,6 +10422,7 @@ dump_offset_table(char *spec, ulong make + fprintf(fp, " blk_mq_tags_rqs: %ld\n", + OFFSET(blk_mq_tags_rqs)); + ++ fprintf(fp, " subsys_private_subsys: %ld\n", OFFSET(subsys_private_subsys)); + fprintf(fp, " subsys_private_klist_devices: %ld\n", + OFFSET(subsys_private_klist_devices)); + fprintf(fp, " subsystem_kset: %ld\n", diff -Nru crash-8.0.2/debian/patches/lp2038249-0013-Fix-failure-of-dev-d-D-options-on-Linux-6.4-and-late.patch crash-8.0.2/debian/patches/lp2038249-0013-Fix-failure-of-dev-d-D-options-on-Linux-6.4-and-late.patch --- crash-8.0.2/debian/patches/lp2038249-0013-Fix-failure-of-dev-d-D-options-on-Linux-6.4-and-late.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0013-Fix-failure-of-dev-d-D-options-on-Linux-6.4-and-late.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,162 @@ +From: Kazuhito Hagio +Date: Tue May 16 08:59:50 2023 +0900 +Subject: Fix failure of "dev -d|-D" options on Linux 6.4 and later kernels + +Kernel commit 2df418cf4b72 ("driver core: class: remove subsystem +private pointer from struct class"), which is contained in Linux 6.4 and +later kernels, removed the class.p member for struct subsys_private. As +a result, the "dev -d|-D" options fail with the following error. + + dev: invalid structure member offset: class_p + FILE: dev.c LINE: 4689 FUNCTION: init_iter() + +Search the class_kset list for the subsys_private of block class to fix +this. + +As a preparation, introduce get_subsys_private() function, which is +abstracted from the same search procedure in init_memory_block(). + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/58c1816521c2e6bece3d69256b1866c9df8d93aa + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -5328,6 +5328,7 @@ struct rb_node *rb_left(struct rb_node * + struct rb_node *rb_next(struct rb_node *); + struct rb_node *rb_last(struct rb_root *); + long percpu_counter_sum_positive(ulong fbc); ++ulong get_subsys_private(char *, char *); + + /* + * symbols.c +--- crash-8.0.2.orig/dev.c ++++ crash-8.0.2/dev.c +@@ -4686,9 +4686,16 @@ init_iter(struct iter *i) + } else { + /* kernel version > 2.6.27, klist */ + unsigned long class_private_addr; +- readmem(block_class_addr + OFFSET(class_p), KVADDR, +- &class_private_addr, sizeof(class_private_addr), +- "class.p", FAULT_ON_ERROR); ++ ++ if (INVALID_MEMBER(class_p)) /* kernel version >= 6.4 */ ++ class_private_addr = get_subsys_private("class_kset", "block"); ++ else ++ readmem(block_class_addr + OFFSET(class_p), KVADDR, ++ &class_private_addr, sizeof(class_private_addr), ++ "class.p", FAULT_ON_ERROR); ++ ++ if (!class_private_addr) ++ error(FATAL, "cannot determine subsys_private for block.\n"); + + if (VALID_STRUCT(class_private)) { + /* 2.6.27 < kernel version <= 2.6.37-rc2 */ +@@ -4823,6 +4830,13 @@ void diskio_init(void) + if (INVALID_MEMBER(class_devices)) + MEMBER_OFFSET_INIT(class_devices, "class", "devices"); + MEMBER_OFFSET_INIT(class_p, "class", "p"); ++ if (INVALID_MEMBER(class_p)) { ++ MEMBER_OFFSET_INIT(kset_list, "kset", "list"); ++ MEMBER_OFFSET_INIT(kset_kobj, "kset", "kobj"); ++ MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); ++ MEMBER_OFFSET_INIT(kobject_entry, "kobject", "entry"); ++ MEMBER_OFFSET_INIT(subsys_private_subsys, "subsys_private", "subsys"); ++ } + MEMBER_OFFSET_INIT(class_private_devices, "class_private", + "class_devices"); + MEMBER_OFFSET_INIT(device_knode_class, "device", "knode_class"); +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -17812,38 +17812,9 @@ init_memory_block(int *klistcnt, ulong * + * v6.3-rc1 + * d2bf38c088e0 driver core: remove private pointer from struct bus_type + */ +- if (INVALID_MEMBER(bus_type_p)) { +- int i, cnt; +- char buf[32]; +- ulong bus_kset, list, name; +- +- BZERO(ld, sizeof(struct list_data)); +- +- get_symbol_data("bus_kset", sizeof(ulong), &bus_kset); +- readmem(bus_kset + OFFSET(kset_list), KVADDR, &list, +- sizeof(ulong), "bus_kset.list", FAULT_ON_ERROR); +- +- ld->flags |= LIST_ALLOCATE; +- ld->start = list; +- ld->end = bus_kset + OFFSET(kset_list); +- ld->list_head_offset = OFFSET(kobject_entry); +- +- cnt = do_list(ld); +- for (i = 0; i < cnt; i++) { +- readmem(ld->list_ptr[i] + OFFSET(kobject_name), KVADDR, &name, +- sizeof(ulong), "kobject.name", FAULT_ON_ERROR); +- read_string(name, buf, sizeof(buf)-1); +- if (CRASHDEBUG(1)) +- fprintf(fp, "kobject: %lx name: %s\n", ld->list_ptr[i], buf); +- if (STREQ(buf, "memory")) { +- /* entry is subsys_private.subsys.kobj. See bus_to_subsys(). */ +- private = ld->list_ptr[i] - OFFSET(kset_kobj) +- - OFFSET(subsys_private_subsys); +- break; +- } +- } +- FREEBUF(ld->list_ptr); +- } else { ++ if (INVALID_MEMBER(bus_type_p)) ++ private = get_subsys_private("bus_kset", "memory"); ++ else { + ulong memory_subsys = symbol_value("memory_subsys"); + readmem(memory_subsys + OFFSET(bus_type_p), KVADDR, &private, + sizeof(void *), "memory_subsys.private", FAULT_ON_ERROR); +--- crash-8.0.2.orig/tools.c ++++ crash-8.0.2/tools.c +@@ -6930,3 +6930,46 @@ percpu_counter_sum_positive(ulong fbc) + + return (ret < 0) ? 0 : ret; + } ++ ++ulong ++get_subsys_private(char *kset_name, char *target_name) ++{ ++ ulong kset_addr, kset_list, name_addr, private = 0; ++ struct list_data list_data, *ld; ++ char buf[32]; ++ int i, cnt; ++ ++ if (!symbol_exists(kset_name)) ++ return 0; ++ ++ ld = &list_data; ++ BZERO(ld, sizeof(struct list_data)); ++ ++ get_symbol_data(kset_name, sizeof(ulong), &kset_addr); ++ readmem(kset_addr + OFFSET(kset_list), KVADDR, &kset_list, ++ sizeof(ulong), "kset.list", FAULT_ON_ERROR); ++ ++ ld->flags |= LIST_ALLOCATE; ++ ld->start = kset_list; ++ ld->end = kset_addr + OFFSET(kset_list); ++ ld->list_head_offset = OFFSET(kobject_entry); ++ ++ cnt = do_list(ld); ++ ++ for (i = 0; i < cnt; i++) { ++ readmem(ld->list_ptr[i] + OFFSET(kobject_name), KVADDR, &name_addr, ++ sizeof(ulong), "kobject.name", FAULT_ON_ERROR); ++ read_string(name_addr, buf, sizeof(buf)-1); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "kobject: %lx name: %s\n", ld->list_ptr[i], buf); ++ if (STREQ(buf, target_name)) { ++ /* entry is subsys_private.subsys.kobj. See bus_to_subsys(). */ ++ private = ld->list_ptr[i] - OFFSET(kset_kobj) ++ - OFFSET(subsys_private_subsys); ++ break; ++ } ++ } ++ FREEBUF(ld->list_ptr); ++ ++ return private; ++} diff -Nru crash-8.0.2/debian/patches/lp2038249-0014-Fix-kmem-v-option-displaying-no-regions-on-Linux-6.3.patch crash-8.0.2/debian/patches/lp2038249-0014-Fix-kmem-v-option-displaying-no-regions-on-Linux-6.3.patch --- crash-8.0.2/debian/patches/lp2038249-0014-Fix-kmem-v-option-displaying-no-regions-on-Linux-6.3.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0014-Fix-kmem-v-option-displaying-no-regions-on-Linux-6.3.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,72 @@ +From: Kazuhito Hagio +Date: Thu May 18 11:48:28 2023 +0900 +Subject: Fix "kmem -v" option displaying no regions on Linux 6.3 and later + +Kernel commit 869176a09606 ("mm/vmalloc.c: add flags to mark vm_map_ram +area"), which is contained in Linux 6.3 and later, added "flags" member +to struct vmap_area. This was the revival of the "flags" member as +kernel commit 688fcbfc06e4 had eliminated it before. + +As a result, crash started to use the old procedure using the member and +displays no vmalloc'd regions, because it does not have the same flag +value as the old one. + + crash> kmem -v + VMAP_AREA VM_STRUCT ADDRESS RANGE SIZE + crash> + +To fix this, also check if vmap_area.purge_list exists, which was +introduced with the flags and removed later, to determine that the flags +member is the old one. + +Related vmap_area history: + v2.6.28 db64fe02258f introduced vmap_area with flags and purge_list + v5.4 688fcbfc06e4 removed flags + v5.11 96e2db456135 removed purge_list + v6.3 869176a09606 added flags again + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/342cf340ed0386880fe2a3115d6bef32eabb511b + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2200,6 +2200,7 @@ struct offset_table { + long maple_metadata_gap; + long kset_kobj; + long subsys_private_subsys; ++ long vmap_area_purge_list; + }; + + struct size_table { /* stash of commonly-used sizes */ +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -428,6 +428,7 @@ vm_init(void) + MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "vm"); + if (INVALID_MEMBER(vmap_area_vm)) + MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "private"); ++ MEMBER_OFFSET_INIT(vmap_area_purge_list, "vmap_area", "purge_list"); + STRUCT_SIZE_INIT(vmap_area, "vmap_area"); + if (VALID_MEMBER(vmap_area_va_start) && + VALID_MEMBER(vmap_area_va_end) && +@@ -9053,7 +9054,8 @@ dump_vmap_area(struct meminfo *vi) + readmem(ld->list_ptr[i], KVADDR, vmap_area_buf, + SIZE(vmap_area), "vmap_area struct", FAULT_ON_ERROR); + +- if (VALID_MEMBER(vmap_area_flags)) { ++ if (VALID_MEMBER(vmap_area_flags) && ++ VALID_MEMBER(vmap_area_purge_list)) { + flags = ULONG(vmap_area_buf + OFFSET(vmap_area_flags)); + if (flags != VM_VM_AREA) + continue; +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -9158,6 +9158,7 @@ dump_offset_table(char *spec, ulong make + OFFSET(vmap_area_vm)); + fprintf(fp, " vmap_area_flags: %ld\n", + OFFSET(vmap_area_flags)); ++ fprintf(fp, " vmap_area_purge_list: %ld\n", OFFSET(vmap_area_purge_list)); + + fprintf(fp, " module_size_of_struct: %ld\n", + OFFSET(module_size_of_struct)); diff -Nru crash-8.0.2/debian/patches/lp2038249-0015-x86_64-Fix-bt-command-printing-stale-entries-on-Linu.patch crash-8.0.2/debian/patches/lp2038249-0015-x86_64-Fix-bt-command-printing-stale-entries-on-Linu.patch --- crash-8.0.2/debian/patches/lp2038249-0015-x86_64-Fix-bt-command-printing-stale-entries-on-Linu.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0015-x86_64-Fix-bt-command-printing-stale-entries-on-Linu.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,336 @@ +From: Kazuhito Hagio +Date: Thu May 18 16:53:54 2023 +0900 +Subject: x86_64: Fix "bt" command printing stale entries on Linux 6.4 and later + +Kernel commit fb799447ae29 ("x86,objtool: Split UNWIND_HINT_EMPTY in +two"), which is contained in Linux 6.4 and later kernels, changed +ORC_TYPE_CALL macro from 0 to 2. As a result, the "bt" command cannot +use ORC entries, and can display stale entries in a call trace. + + crash> bt 1 + PID: 1 TASK: ffff93cd06294180 CPU: 51 COMMAND: "systemd" + #0 [ffffb72bc00cbc98] __schedule at ffffffff86e52aae + #1 [ffffb72bc00cbd00] schedule at ffffffff86e52f6a + #2 [ffffb72bc00cbd18] schedule_hrtimeout_range_clock at ffffffff86e58ef5 + #3 [ffffb72bc00cbd88] ep_poll at ffffffff8669624d + #4 [ffffb72bc00cbe28] do_epoll_wait at ffffffff86696371 + #5 [ffffb72bc00cbe30] do_timerfd_settime at ffffffff8669902b << + #6 [ffffb72bc00cbe60] __x64_sys_epoll_wait at ffffffff86696bf0 + #7 [ffffb72bc00cbeb0] do_syscall_64 at ffffffff86e3feb9 + #8 [ffffb72bc00cbee0] __task_pid_nr_ns at ffffffff863330d7 << + #9 [ffffb72bc00cbf08] syscall_exit_to_user_mode at ffffffff86e466b2 << stale entries + #10 [ffffb72bc00cbf18] do_syscall_64 at ffffffff86e3fec9 << + #11 [ffffb72bc00cbf50] entry_SYSCALL_64_after_hwframe at ffffffff870000aa + +Also, kernel commit ffb1b4a41016 added a member to struct orc_entry. +Although this does not affect the crash's unwinder, its debugging +information can be displayed incorrectly. + +To fix these, +(1) introduce "kernel_orc_entry_6_4" structure corresponding to 6.4 and + abstruction layer "orc_entry" structure in crash, +(2) switch ORC_TYPE_CALL to 2 or 0 with kernel's orc_entry structure. + +Related orc_entry history: + v4.14 39358a033b2e introduced struct orc_entry + v4.19 d31a580266ee added orc_entry.end member + v6.3 ffb1b4a41016 added orc_entry.signal member + v6.4 fb799447ae29 removed end member and changed type member to 3 bits + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: backport, https://github.com/crash-utility/crash/commit/77d8621876c1c6a3a25b91e464ba588a542485fb +[chengen - modify x86_64.c context] + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -6159,9 +6159,29 @@ typedef struct __attribute__((__packed__ + unsigned int sp_reg:4; + unsigned int bp_reg:4; + unsigned int type:2; ++ unsigned int signal:1; + unsigned int end:1; + } kernel_orc_entry; + ++typedef struct __attribute__((__packed__)) { ++ signed short sp_offset; ++ signed short bp_offset; ++ unsigned int sp_reg:4; ++ unsigned int bp_reg:4; ++ unsigned int type:3; ++ unsigned int signal:1; ++} kernel_orc_entry_6_4; ++ ++typedef struct orc_entry { ++ signed short sp_offset; ++ signed short bp_offset; ++ unsigned int sp_reg; ++ unsigned int bp_reg; ++ unsigned int type; ++ unsigned int signal; ++ unsigned int end; ++} orc_entry; ++ + struct ORC_data { + int module_ORC; + uint lookup_num_blocks; +@@ -6172,10 +6192,13 @@ struct ORC_data { + ulong orc_lookup; + ulong ip_entry; + ulong orc_entry; +- kernel_orc_entry kernel_orc_entry; ++ orc_entry orc_entry_data; ++ int has_signal; ++ int has_end; + }; + +-#define ORC_TYPE_CALL 0 ++#define ORC_TYPE_CALL ((machdep->flags & ORC_6_4) ? 2 : 0) ++/* The below entries are not used and must be updated if we use them. */ + #define ORC_TYPE_REGS 1 + #define ORC_TYPE_REGS_IRET 2 + #define UNWIND_HINT_TYPE_SAVE 3 +@@ -6252,6 +6275,7 @@ struct machine_specific { + #define ORC (0x4000) + #define KPTI (0x8000) + #define L1TF (0x10000) ++#define ORC_6_4 (0x20000) + + #define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4|VM_5LEVEL) + +--- crash-8.0.2.orig/x86_64.c ++++ crash-8.0.2/x86_64.c +@@ -132,9 +132,9 @@ static void GART_init(void); + static void x86_64_exception_stacks_init(void); + static int in_START_KERNEL_map(ulong); + static ulong orc_ip(ulong); +-static kernel_orc_entry *__orc_find(ulong, ulong, uint, ulong); +-static kernel_orc_entry *orc_find(ulong); +-static kernel_orc_entry *orc_module_find(ulong); ++static orc_entry *__orc_find(ulong, ulong, uint, ulong); ++static orc_entry *orc_find(ulong); ++static orc_entry *orc_module_find(ulong); + static ulong ip_table_to_vaddr(ulong); + static void orc_dump(ulong); + +@@ -806,6 +806,8 @@ x86_64_dump_machdep_table(ulong arg) + fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); + if (machdep->flags & ORC) + fprintf(fp, "%sORC", others++ ? "|" : ""); ++ if (machdep->flags & ORC_6_4) ++ fprintf(fp, "%sORC_6_4", others++ ? "|" : ""); + if (machdep->flags & FRAMEPOINTER) + fprintf(fp, "%sFRAMEPOINTER", others++ ? "|" : ""); + if (machdep->flags & GART_REGION) +@@ -980,6 +982,8 @@ x86_64_dump_machdep_table(ulong arg) + fprintf(fp, " ORC_data: %s", machdep->flags & ORC ? "\n" : "(unused)\n"); + if (machdep->flags & ORC) { + fprintf(fp, " module_ORC: %s\n", ms->orc.module_ORC ? "TRUE" : "FALSE"); ++ fprintf(fp, " has_signal: %s\n", ms->orc.has_signal ? "TRUE" : "FALSE"); ++ fprintf(fp, " has_end: %s\n", ms->orc.has_end ? "TRUE" : "FALSE"); + fprintf(fp, " lookup_num_blocks: %d\n", ms->orc.lookup_num_blocks); + fprintf(fp, " __start_orc_unwind_ip: %lx\n", ms->orc.__start_orc_unwind_ip); + fprintf(fp, " __stop_orc_unwind_ip: %lx\n", ms->orc.__stop_orc_unwind_ip); +@@ -988,14 +992,18 @@ x86_64_dump_machdep_table(ulong arg) + fprintf(fp, " orc_lookup: %lx\n", ms->orc.orc_lookup); + fprintf(fp, " ip_entry: %lx\n", ms->orc.ip_entry); + fprintf(fp, " orc_entry: %lx\n", ms->orc.orc_entry); +- fprintf(fp, " kernel_orc_entry:\n"); +- fprintf(fp, " sp_offset: %d\n", ms->orc.kernel_orc_entry.sp_offset); +- fprintf(fp, " bp_offset: %d\n", ms->orc.kernel_orc_entry.bp_offset); +- fprintf(fp, " sp_reg: %d\n", ms->orc.kernel_orc_entry.sp_reg); +- fprintf(fp, " bp_reg: %d\n", ms->orc.kernel_orc_entry.bp_reg); +- fprintf(fp, " type: %d\n", ms->orc.kernel_orc_entry.type); +- if (MEMBER_EXISTS("orc_entry", "end")) +- fprintf(fp, " end: %d\n", ms->orc.kernel_orc_entry.end); ++ fprintf(fp, " orc_entry_data:\n"); ++ fprintf(fp, " sp_offset: %d\n", ms->orc.orc_entry_data.sp_offset); ++ fprintf(fp, " bp_offset: %d\n", ms->orc.orc_entry_data.bp_offset); ++ fprintf(fp, " sp_reg: %d\n", ms->orc.orc_entry_data.sp_reg); ++ fprintf(fp, " bp_reg: %d\n", ms->orc.orc_entry_data.bp_reg); ++ fprintf(fp, " type: %d\n", ms->orc.orc_entry_data.type); ++ if (ms->orc.has_signal) ++ fprintf(fp, " signal: %d\n", ms->orc.orc_entry_data.signal); ++ else ++ fprintf(fp, " signal: (n/a)\n"); ++ if (ms->orc.has_end) ++ fprintf(fp, " end: %d\n", ms->orc.orc_entry_data.end); + else + fprintf(fp, " end: (n/a)\n"); + } +@@ -6415,6 +6423,12 @@ x86_64_ORC_init(void) + orc->__stop_orc_unwind = symbol_value("__stop_orc_unwind"); + orc->orc_lookup = symbol_value("orc_lookup"); + ++ orc->has_signal = MEMBER_EXISTS("orc_entry", "signal"); /* added at 6.3 */ ++ orc->has_end = MEMBER_EXISTS("orc_entry", "end"); /* removed at 6.4 */ ++ ++ if (orc->has_signal && !orc->has_end) ++ machdep->flags |= ORC_6_4; ++ + machdep->flags |= ORC; + } + +@@ -8489,7 +8503,7 @@ x86_64_get_framesize(struct bt_info *bt, + int reterror; + int arg_exists; + int exception; +- kernel_orc_entry *korc; ++ orc_entry *korc; + + if (!(bt->flags & BT_FRAMESIZE_DEBUG)) { + if ((bt->flags & BT_FRAMESIZE_IGNORE_MASK) || +@@ -8575,12 +8589,15 @@ x86_64_get_framesize(struct bt_info *bt, + + if ((machdep->flags & ORC) && (korc = orc_find(textaddr))) { + if (CRASHDEBUG(1)) { ++ struct ORC_data *orc = &machdep->machspec->orc; + fprintf(fp, + "rsp: %lx textaddr: %lx framesize: %d -> spo: %d bpo: %d spr: %d bpr: %d type: %d %s", + rsp, textaddr, framesize, korc->sp_offset, korc->bp_offset, + korc->sp_reg, korc->bp_reg, korc->type, + (korc->type == ORC_TYPE_CALL) && (korc->sp_reg == ORC_REG_SP) ? "" : "(UNUSED)"); +- if (MEMBER_EXISTS("orc_entry", "end")) ++ if (orc->has_signal) ++ fprintf(fp, " signal: %d", korc->signal); ++ if (orc->has_end) + fprintf(fp, " end: %d", korc->end); + fprintf(fp, "\n"); + } +@@ -9055,7 +9072,53 @@ orc_ip(ulong ip) + return (ip + ip_entry); + } + +-static kernel_orc_entry * ++static orc_entry * ++orc_get_entry(struct ORC_data *orc) ++{ ++ struct orc_entry *entry = &orc->orc_entry_data; ++ ++ if (machdep->flags & ORC_6_4) { ++ kernel_orc_entry_6_4 korc; ++ ++ if (!readmem(orc->orc_entry, KVADDR, &korc, sizeof(kernel_orc_entry_6_4), ++ "kernel orc_entry", RETURN_ON_ERROR|QUIET)) ++ return NULL; ++ ++ entry->sp_offset = korc.sp_offset; ++ entry->bp_offset = korc.bp_offset; ++ entry->sp_reg = korc.sp_reg; ++ entry->bp_reg = korc.bp_reg; ++ entry->type = korc.type; ++ entry->signal = korc.signal; ++ } else { ++ kernel_orc_entry korc; ++ ++ if (!readmem(orc->orc_entry, KVADDR, &korc, sizeof(kernel_orc_entry), ++ "kernel orc_entry", RETURN_ON_ERROR|QUIET)) ++ return NULL; ++ ++ entry->sp_offset = korc.sp_offset; ++ entry->bp_offset = korc.bp_offset; ++ entry->sp_reg = korc.sp_reg; ++ entry->bp_reg = korc.bp_reg; ++ entry->type = korc.type; ++ if (orc->has_end) { ++ /* ++ * orc_entry.signal was inserted before orc_entry.end. ++ * see ffb1b4a41016. ++ */ ++ if (orc->has_signal) { ++ entry->signal = korc.signal; ++ entry->end = korc.end; ++ } else ++ entry->end = korc.signal; /* on purpose */ ++ } ++ } ++ ++ return entry; ++} ++ ++static orc_entry * + __orc_find(ulong ip_table_ptr, ulong u_table_ptr, uint num_entries, ulong ip) + { + int index; +@@ -9065,7 +9128,7 @@ __orc_find(ulong ip_table_ptr, ulong u_t + int *ip_table = (int *)ip_table_ptr; + struct ORC_data *orc = &machdep->machspec->orc; + ulong vaddr; +- kernel_orc_entry *korc; ++ orc_entry *korc; + + if (CRASHDEBUG(2)) { + int i, ip_entry; +@@ -9109,18 +9172,20 @@ __orc_find(ulong ip_table_ptr, ulong u_t + + orc->ip_entry = (ulong)found; + orc->orc_entry = u_table_ptr + (index * SIZE(orc_entry)); +- if (!readmem(orc->orc_entry, KVADDR, &orc->kernel_orc_entry, +- sizeof(kernel_orc_entry), "kernel orc_entry", RETURN_ON_ERROR|QUIET)) ++ ++ if (!orc_get_entry(orc)) + return NULL; + +- korc = &orc->kernel_orc_entry; ++ korc = &orc->orc_entry_data; + + if (CRASHDEBUG(2)) { + fprintf(fp, " found: %lx index: %d\n", (ulong)found, index); + fprintf(fp, + " orc_entry: %lx sp_offset: %d bp_offset: %d sp_reg: %d bp_reg: %d type: %d", + orc->orc_entry, korc->sp_offset, korc->bp_offset, korc->sp_reg, korc->bp_reg, korc->type); +- if (MEMBER_EXISTS("orc_entry", "end")) ++ if (orc->has_signal) ++ fprintf(fp, " signal: %d", korc->signal); ++ if (orc->has_end) + fprintf(fp, " end: %d", korc->end); + fprintf(fp, "\n"); + } +@@ -9133,7 +9198,7 @@ __orc_find(ulong ip_table_ptr, ulong u_t + #define LOOKUP_START_IP (unsigned long)kt->stext + #define LOOKUP_STOP_IP (unsigned long)kt->etext + +-static kernel_orc_entry * ++static orc_entry * + orc_find(ulong ip) + { + unsigned int idx, start, stop; +@@ -9203,7 +9268,7 @@ orc_find(ulong ip) + orc->__start_orc_unwind + (start * SIZE(orc_entry)), stop - start, ip); + } + +-static kernel_orc_entry * ++static orc_entry * + orc_module_find(ulong ip) + { + struct load_module *lm; +@@ -9250,7 +9315,7 @@ static void + orc_dump(ulong ip) + { + struct ORC_data *orc = &machdep->machspec->orc; +- kernel_orc_entry *korc; ++ orc_entry *korc; + ulong vaddr, offset; + struct syment *sp, *orig; + +@@ -9273,13 +9338,15 @@ next_in_func: + fprintf(fp, "%s+%ld -> ", sp->name, offset); + else + fprintf(fp, "(unresolved) -> "); +- if (!readmem(orc->orc_entry, KVADDR, &orc->kernel_orc_entry, sizeof(kernel_orc_entry), +- "kernel orc_entry", RETURN_ON_ERROR)) ++ ++ if (!orc_get_entry(orc)) + error(FATAL, "cannot read orc_entry\n"); +- korc = &orc->kernel_orc_entry; ++ korc = &orc->orc_entry_data; + fprintf(fp, "orc: %lx spo: %d bpo: %d spr: %d bpr: %d type: %d", + orc->orc_entry, korc->sp_offset, korc->bp_offset, korc->sp_reg, korc->bp_reg, korc->type); +- if (MEMBER_EXISTS("orc_entry", "end")) ++ if (orc->has_signal) ++ fprintf(fp, " signal: %d", korc->signal); ++ if (orc->has_end) + fprintf(fp, " end: %d", korc->end); + fprintf(fp, "\n"); + diff -Nru crash-8.0.2/debian/patches/lp2038249-0016-Support-module-memory-layout-change-on-Linux-6.4.patch crash-8.0.2/debian/patches/lp2038249-0016-Support-module-memory-layout-change-on-Linux-6.4.patch --- crash-8.0.2/debian/patches/lp2038249-0016-Support-module-memory-layout-change-on-Linux-6.4.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0016-Support-module-memory-layout-change-on-Linux-6.4.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,2366 @@ +From: Kazuhito Hagio +Date: Thu Jun 22 16:09:07 2023 +0900 +Subject: Support module memory layout change on Linux 6.4 + +Support module memory layout change on Linux 6.4 by kernel commit +ac3b43283923 ("module: replace module_layout with module_memory") [1]. +Without the patch, crash cannot even start a session with an error +message like this: + + crash: invalid structure member offset: module_core_size + FILE: kernel.c LINE: 3787 FUNCTION: module_init() + +[1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ac3b43283923 + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: backport, https://github.com/crash-utility/crash/commit/7750e61fdb2a083f26156a5338aa2ebe26447f3f +[chengen - modify gdb-10.2.patch context] + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -669,6 +669,7 @@ struct new_utsname { + #define IRQ_DESC_TREE_RADIX (0x40ULL) + #define IRQ_DESC_TREE_XARRAY (0x80ULL) + #define KMOD_PAX (0x100ULL) ++#define KMOD_MEMORY (0x200ULL) + + #define XEN() (kt->flags & ARCH_XEN) + #define OPENVZ() (kt->flags & ARCH_OPENVZ) +@@ -676,6 +677,7 @@ struct new_utsname { + #define PVOPS_XEN() (kt->flags & ARCH_PVOPS_XEN) + + #define PAX_MODULE_SPLIT() (kt->flags2 & KMOD_PAX) ++#define MODULE_MEMORY() (kt->flags2 & KMOD_MEMORY) + + #define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) + #define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) +@@ -2201,6 +2203,9 @@ struct offset_table { + long kset_kobj; + long subsys_private_subsys; + long vmap_area_purge_list; ++ long module_mem; ++ long module_memory_base; ++ long module_memory_size; + }; + + struct size_table { /* stash of commonly-used sizes */ +@@ -2374,6 +2379,7 @@ struct size_table { /* stash of + long percpu_counter; + long maple_tree; + long maple_node; ++ long module_memory; + }; + + struct array_table { +@@ -2902,6 +2908,23 @@ struct mod_section_data { + ulong size; + int priority; + int flags; ++ ulong addr; ++}; ++ ++/* Emulate enum mod_mem_type in include/linux/module.h */ ++#define MOD_TEXT (0) ++#define MOD_DATA (1) ++#define MOD_RODATA (2) ++#define MOD_RO_AFTER_INIT (3) ++#define MOD_INIT_TEXT (4) ++#define MOD_INIT_DATA (5) ++#define MOD_INIT_RODATA (6) ++#define MOD_MEM_NUM_TYPES (7) ++#define MOD_INVALID (-1) ++ ++struct module_memory { ++ ulong base; ++ uint size; + }; + + struct load_module { +@@ -2937,19 +2960,29 @@ struct load_module { + ulong mod_percpu; + ulong mod_percpu_size; + struct objfile *loaded_objfile; +-}; + +-#define IN_MODULE(A,L) \ +- (((ulong)(A) >= (L)->mod_base) && ((ulong)(A) < ((L)->mod_base+(L)->mod_size))) +- +-#define IN_MODULE_INIT(A,L) \ +- (((ulong)(A) >= (L)->mod_init_module_ptr) && ((ulong)(A) < ((L)->mod_init_module_ptr+(L)->mod_init_size))) ++ /* For 6.4 module_memory */ ++ struct module_memory mem[MOD_MEM_NUM_TYPES]; ++ struct syment **symtable; ++ struct syment **symend; ++ struct syment *ext_symtable[MOD_MEM_NUM_TYPES]; ++ struct syment *ext_symend[MOD_MEM_NUM_TYPES]; ++ struct syment *load_symtable[MOD_MEM_NUM_TYPES]; ++ struct syment *load_symend[MOD_MEM_NUM_TYPES]; ++}; + ++#define IN_MODULE(A,L) (in_module_range(A, L, MOD_TEXT, MOD_RO_AFTER_INIT) != MOD_INVALID) ++#define IN_MODULE_INIT(A,L) (in_module_range(A, L, MOD_INIT_TEXT, MOD_INIT_RODATA) != MOD_INVALID) ++#define IN_MODULE_TEXT(A,L) (in_module_range(A, L, MOD_TEXT, MOD_TEXT) == MOD_TEXT || \ ++ in_module_range(A, L, MOD_INIT_TEXT, MOD_INIT_TEXT) == MOD_INIT_TEXT) + #define IN_MODULE_PERCPU(A,L) \ + (((ulong)(A) >= (L)->mod_percpu) && ((ulong)(A) < ((L)->mod_percpu+(L)->mod_percpu_size))) + + #define MODULE_PERCPU_SYMS_LOADED(L) ((L)->mod_percpu && (L)->mod_percpu_size) + ++#define for_each_mod_mem_type(type) \ ++ for ((type) = MOD_TEXT; (type) < MOD_MEM_NUM_TYPES; (type)++) ++ + #ifndef GDB_COMMON + + #define KVADDR (0x1) +@@ -5390,6 +5423,7 @@ void dump_struct_member(char *, ulong, u + void dump_union(char *, ulong, unsigned); + void store_module_symbols_v1(ulong, int); + void store_module_symbols_v2(ulong, int); ++void store_module_symbols_6_4(ulong, int); + int is_datatype_command(void); + int is_typedef(char *); + int arg_to_datatype(char *, struct datatype_member *, ulong); +--- crash-8.0.2.orig/gdb-10.2.patch ++++ crash-8.0.2/gdb-10.2.patch +@@ -1737,3 +1737,29 @@ exit 0 + struct field *nextfield; + short nfields; + struct type *typedef_type, *target_type; ++ ++--- gdb-10.2/gdb/symtab.c.orig +++++ gdb-10.2/gdb/symtab.c ++@@ -7476,7 +7476,7 @@ gdb_add_symbol_file(struct gnu_request * ++ int i; ++ int allsect = 0; ++ char *secname; ++- char buf[80]; +++ char buf[96]; ++ ++ gdb_current_load_module = lm = (struct load_module *)req->addr; ++ ++@@ -7515,8 +7515,11 @@ gdb_add_symbol_file(struct gnu_request * ++ secname = lm->mod_section_data[i].name; ++ if ((lm->mod_section_data[i].flags & SEC_FOUND) && ++ !STREQ(secname, ".text")) { ++- sprintf(buf, " -s %s 0x%lx", secname, ++- lm->mod_section_data[i].offset + lm->mod_base); +++ if (lm->mod_section_data[i].addr) +++ sprintf(buf, " -s %s 0x%lx", secname, lm->mod_section_data[i].addr); +++ else +++ sprintf(buf, " -s %s 0x%lx", secname, +++ lm->mod_section_data[i].offset + lm->mod_base); ++ strcat(req->buf, buf); ++ } ++ } +--- crash-8.0.2.orig/kernel.c ++++ crash-8.0.2/kernel.c +@@ -3567,7 +3567,21 @@ module_init(void) + MEMBER_OFFSET_INIT(module_num_gpl_syms, "module", + "num_gpl_syms"); + +- if (MEMBER_EXISTS("module", "module_core")) { ++ if (MEMBER_EXISTS("module", "mem")) { /* 6.4 and later */ ++ kt->flags2 |= KMOD_MEMORY; /* MODULE_MEMORY() can be used. */ ++ ++ MEMBER_OFFSET_INIT(module_mem, "module", "mem"); ++ MEMBER_OFFSET_INIT(module_memory_base, "module_memory", "base"); ++ MEMBER_OFFSET_INIT(module_memory_size, "module_memory", "size"); ++ STRUCT_SIZE_INIT(module_memory, "module_memory"); ++ ++ if (CRASHDEBUG(1)) ++ error(INFO, "struct module_memory detected.\n"); ++ ++ if (get_array_length("module.mem", NULL, 0) != MOD_MEM_NUM_TYPES) ++ error(WARNING, "module memory types have changed!\n"); ++ ++ } else if (MEMBER_EXISTS("module", "module_core")) { + MEMBER_OFFSET_INIT(module_core_size, "module", + "core_size"); + MEMBER_OFFSET_INIT(module_init_size, "module", +@@ -3753,6 +3767,8 @@ module_init(void) + total += nsyms; + total += 2; /* store the module's start/ending addresses */ + total += 2; /* and the init start/ending addresses */ ++ if (MODULE_MEMORY()) /* 7 regions at most -> 14, so needs +10 */ ++ total += 10; + + /* + * If the module has kallsyms, set up to grab them as well. +@@ -3780,7 +3796,11 @@ module_init(void) + case KALLSYMS_V2: + if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) { + numksyms = UINT(modbuf + OFFSET(module_num_symtab)); +- size = UINT(modbuf + MODULE_OFFSET2(module_core_size, rx)); ++ if (MODULE_MEMORY()) ++ /* check mem[MOD_TEXT].size only */ ++ size = UINT(modbuf + OFFSET(module_mem) + OFFSET(module_memory_size)); ++ else ++ size = UINT(modbuf + MODULE_OFFSET2(module_core_size, rx)); + } else { + numksyms = ULONG(modbuf + OFFSET(module_num_symtab)); + size = ULONG(modbuf + MODULE_OFFSET2(module_core_size, rx)); +@@ -3818,7 +3838,10 @@ module_init(void) + store_module_symbols_v1(total, kt->mods_installed); + break; + case KMOD_V2: +- store_module_symbols_v2(total, kt->mods_installed); ++ if (MODULE_MEMORY()) ++ store_module_symbols_6_4(total, kt->mods_installed); ++ else ++ store_module_symbols_v2(total, kt->mods_installed); + break; + } + +@@ -3832,7 +3855,7 @@ module_init(void) + static int + verify_modules(void) + { +- int i; ++ int i, t; + int found, irregularities; + ulong mod, mod_next, mod_base; + long mod_size; +@@ -3889,8 +3912,13 @@ verify_modules(void) + mod_base = mod; + break; + case KMOD_V2: +- mod_base = ULONG(modbuf + +- MODULE_OFFSET2(module_module_core, rx)); ++ if (MODULE_MEMORY()) ++ /* mem[MOD_TEXT].base */ ++ mod_base = ULONG(modbuf + OFFSET(module_mem) + ++ OFFSET(module_memory_base)); ++ else ++ mod_base = ULONG(modbuf + ++ MODULE_OFFSET2(module_module_core, rx)); + break; + } + +@@ -3912,7 +3940,17 @@ verify_modules(void) + case KMOD_V2: + module_name = modbuf + + OFFSET(module_name); +- if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) ++ if (MODULE_MEMORY()) { ++ mod_size = 0; ++ for_each_mod_mem_type(t) { ++ if (t == MOD_INIT_TEXT) ++ break; ++ ++ mod_size += UINT(modbuf + OFFSET(module_mem) + ++ SIZE(module_memory) * t + ++ OFFSET(module_memory_size)); ++ } ++ } else if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) + mod_size = UINT(modbuf + + MODULE_OFFSET2(module_core_size, rx)); + else +@@ -4532,7 +4570,7 @@ do_module_cmd(ulong flag, char *modref, + "MODULE"), + mkstring(buf2, maxnamelen, LJUST, "NAME"), + mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, +- "BASE"), ++ MODULE_MEMORY() ? "TEXT_BASE" : "BASE"), + mkstring(buf3, maxsizelen, RJUST, "SIZE")); + } + +@@ -6140,6 +6178,8 @@ dump_kernel_table(int verbose) + fprintf(fp, "%sIRQ_DESC_TREE_XARRAY", others++ ? "|" : ""); + if (kt->flags2 & KMOD_PAX) + fprintf(fp, "%sKMOD_PAX", others++ ? "|" : ""); ++ if (kt->flags2 & KMOD_MEMORY) ++ fprintf(fp, "%sKMOD_MEMORY", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " stext: %lx\n", kt->stext); +--- crash-8.0.2.orig/memory.c ++++ crash-8.0.2/memory.c +@@ -15636,10 +15636,44 @@ in_vmlist_segment(ulong vaddr) + static int + next_module_vaddr(ulong vaddr, ulong *nextvaddr) + { +- int i; +- ulong start, end; ++ int i, t; ++ ulong start, end, min = (ulong)-1; + struct load_module *lm; + ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ start = lm->mem[t].base; ++ end = start + lm->mem[t].size; ++ ++ if (vaddr >= end) ++ continue; ++ ++ if (vaddr < start) { ++ if (start < min) /* replace candidate */ ++ min = start; ++ continue; ++ } ++ ++ *nextvaddr = vaddr; ++ return TRUE; ++ } ++ } ++ ++ if (min != (ulong)-1) { ++ *nextvaddr = min; ++ return TRUE; ++ } ++ return FALSE; ++ ++old_module: + for (i = 0; i < st->mods_installed; i++) { + lm = &st->load_modules[i]; + start = lm->mod_base; +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -48,8 +48,8 @@ static int load_module_index(struct syme + static void section_header_info(bfd *, asection *, void *); + static void store_section_data(struct load_module *, bfd *, asection *); + static void calculate_load_order_v1(struct load_module *, bfd *); +-static void calculate_load_order_v2(struct load_module *, bfd *, int, +- void *, long, unsigned int); ++static void calculate_load_order_v2(struct load_module *, bfd *, int, void *, long, unsigned int); ++static void calculate_load_order_6_4(struct load_module *, bfd *, int, void *, long, unsigned int); + static void check_insmod_builtin(struct load_module *, int, ulong *); + static int is_insmod_builtin(struct load_module *, struct syment *); + struct load_module; +@@ -104,6 +104,42 @@ static unsigned char is_right_brace(cons + static struct struct_elem *find_node(struct struct_elem *, char *); + static void dump_node(struct struct_elem *, char *, unsigned char, unsigned char); + ++static int module_mem_type(ulong, struct load_module *); ++static ulong module_mem_end(ulong, struct load_module *); ++static int in_module_range(ulong, struct load_module *, int, int); ++static struct syment *value_search_module_6_4(ulong, ulong *); ++static struct syment *next_symbol_by_symname(char *); ++static struct syment *prev_symbol_by_symname(char *); ++static struct syment *next_module_symbol_by_value(ulong); ++static struct syment *prev_module_symbol_by_value(ulong); ++static struct syment *next_module_symbol_by_syment(struct syment *); ++static struct syment *prev_module_symbol_by_syment(struct syment *); ++ ++struct module_tag { ++ char *start; ++ char *end; ++ char *start_str; ++ char *end_str; ++}; ++ ++#define MODULE_TAG(type, suffix) ("_MODULE_" #type "_" #suffix "_") ++#define MODULE_STR(type, suffix) ( "MODULE " #type " " #suffix) ++#define MODULE_TAGS(type) { \ ++ .start = MODULE_TAG(type, START), \ ++ .end = MODULE_TAG(type, END), \ ++ .start_str = MODULE_STR(type, START), \ ++ .end_str = MODULE_STR(type, END) \ ++} ++ ++static const struct module_tag module_tag[] = { ++ MODULE_TAGS(TEXT), ++ MODULE_TAGS(DATA), ++ MODULE_TAGS(RODATA), ++ MODULE_TAGS(RO_AFTER_INIT), ++ MODULE_TAGS(INIT_TEXT), ++ MODULE_TAGS(INIT_DATA), ++ MODULE_TAGS(INIT_RODATA), ++}; + + /* + * structure/union printing stuff +@@ -1268,10 +1304,7 @@ symname_hash_search(struct syment *table + * Output for sym -[lL] command. + */ + +-#define MODULE_PSEUDO_SYMBOL(sp) \ +- ((STRNEQ((sp)->name, "_MODULE_START_") || STRNEQ((sp)->name, "_MODULE_END_")) || \ +- (STRNEQ((sp)->name, "_MODULE_INIT_START_") || STRNEQ((sp)->name, "_MODULE_INIT_END_")) || \ +- (STRNEQ((sp)->name, "_MODULE_SECTION_"))) ++#define MODULE_PSEUDO_SYMBOL(sp) (STRNEQ((sp)->name, "_MODULE_")) + + #define MODULE_START(sp) (STRNEQ((sp)->name, "_MODULE_START_")) + #define MODULE_END(sp) (STRNEQ((sp)->name, "_MODULE_END_")) +@@ -1280,6 +1313,76 @@ symname_hash_search(struct syment *table + #define MODULE_SECTION_START(sp) (STRNEQ((sp)->name, "_MODULE_SECTION_START")) + #define MODULE_SECTION_END(sp) (STRNEQ((sp)->name, "_MODULE_SECTION_END")) + ++#define MODULE_MEM_START(sp,t) (STRNEQ((sp)->name, module_tag[t].start)) ++#define MODULE_MEM_END(sp,t) (STRNEQ((sp)->name, module_tag[t].end)) ++ ++/* For 6.4 and later */ ++static void ++module_symbol_dump(char *module) ++{ ++ int i, t; ++ struct syment *sp, *sp_end; ++ struct load_module *lm; ++ const char *p1, *p2; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ ++ lm = &st->load_modules[i]; ++ if (module && !STREQ(module, lm->mod_name)) ++ continue; ++ ++ if (received_SIGINT() || output_closed()) ++ return; ++ ++ /* ++ * module percpu symbols are within the .data..percpu section, ++ * not in any module memory regions. ++ */ ++ if (MODULE_PERCPU_SYMS_LOADED(lm)) { ++ p1 = "MODULE PERCPU START"; ++ p2 = lm->mod_name; ++ fprintf(fp, "%lx %s: %s\n", lm->mod_percpu, p1, p2); ++ ++ dump_percpu_symbols(lm); ++ ++ p1 = "MODULE PERCPU END"; ++ fprintf(fp, "%lx %s: %s\n", lm->mod_percpu + lm->mod_percpu_size, p1, p2); ++ } ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ ++ for ( ; sp <= sp_end; sp++) { ++ if (MODULE_PSEUDO_SYMBOL(sp)) { ++ if (MODULE_MEM_START(sp, t)) { ++ p1 = module_tag[t].start_str; ++ p2 = sp->name + strlen(module_tag[t].start); ++ } else if (MODULE_MEM_END(sp, t)) { ++ p1 = module_tag[t].end_str; ++ p2 = sp->name + strlen(module_tag[t].end); ++ } else if (MODULE_SECTION_START(sp)) { ++ p1 = sp->name + strlen("_MODULE_SECTION_START "); ++ p2 = "section start"; ++ } else if (MODULE_SECTION_END(sp)) { ++ p1 = sp->name + strlen("_MODULE_SECTION_END "); ++ p2 = "section end"; ++ } else { ++ p1 = "unknown tag"; ++ p2 = sp->name; ++ } ++ ++ fprintf(fp, "%lx %s: %s\n", sp->value, p1, p2); ++ } else ++ show_symbol(sp, 0, SHOW_RADIX()); ++ } ++ } ++ } ++} ++ + static void + symbol_dump(ulong flags, char *module) + { +@@ -1302,6 +1405,11 @@ symbol_dump(ulong flags, char *module) + if (!(flags & MODULE_SYMS)) + return; + ++ if (MODULE_MEMORY()) { ++ module_symbol_dump(module); ++ return; ++ } ++ + for (i = 0; i < st->mods_installed; i++) { + + lm = &st->load_modules[i]; +@@ -1389,8 +1497,14 @@ dump_percpu_symbols(struct load_module * + struct syment *sp, *sp_end; + + if (MODULE_PERCPU_SYMS_LOADED(lm)) { +- sp = lm->mod_symtable; +- sp_end = lm->mod_symend; ++ if (MODULE_MEMORY()) { ++ /* The lm should have mod_load_symtable. */ ++ sp = lm->mod_load_symtable; ++ sp_end = lm->mod_load_symend; ++ } else { ++ sp = lm->mod_symtable; ++ sp_end = lm->mod_symend; ++ } + for ( ; sp <= sp_end; sp++) { + if (IN_MODULE_PERCPU(sp->value, lm)) + show_symbol(sp, 0, SHOW_RADIX()); +@@ -1425,8 +1539,13 @@ check_for_dups(struct load_module *lm) + { + struct syment *sp, *sp_end; + +- sp = lm->mod_symtable; +- sp_end = lm->mod_symend; ++ if (MODULE_MEMORY()) { ++ sp = lm->mod_load_symtable; ++ sp_end = lm->mod_load_symend; ++ } else { ++ sp = lm->mod_symtable; ++ sp_end = lm->mod_symend; ++ } + + for ( ; sp <= sp_end; sp++) { + if (symbol_name_count(sp->name) > 1) +@@ -1788,6 +1907,362 @@ modsym_value(ulong syms, union kernel_sy + return 0; + } + ++/* ++ * Linux 6.4 introduced module.mem memory layout ++ */ ++void ++store_module_symbols_6_4(ulong total, int mods_installed) ++{ ++ int i, m, t; ++ ulong mod, mod_next; ++ char *mod_name; ++ uint nsyms, ngplsyms; ++ ulong syms, gpl_syms; ++ ulong nksyms; ++ long strbuflen; ++ ulong size; ++ int mcnt, lm_mcnt; ++ union kernel_symbol *modsym; ++ size_t kernel_symbol_size; ++ struct load_module *lm; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char *strbuf = NULL, *modbuf, *modsymbuf; ++ struct syment *sp; ++ ulong first, last; ++ ++ st->mods_installed = mods_installed; ++ ++ if (!st->mods_installed) { ++ st->flags &= ~MODULE_SYMS; ++ return; ++ } ++ ++ /* ++ * If we've been here before, free up everything and start over. ++ */ ++ if (st->flags & MODULE_SYMS) ++ error(FATAL, "re-initialization of module symbols not implemented yet!\n"); ++ ++ kernel_symbol_size = kernel_symbol_type_init(); ++ ++ if ((st->ext_module_symtable = (struct syment *) ++ calloc(total, sizeof(struct syment))) == NULL) ++ error(FATAL, "module syment space malloc (%ld symbols): %s\n", ++ total, strerror(errno)); ++ ++ if (!namespace_ctl(NAMESPACE_INIT, &st->ext_module_namespace, ++ (void *)total, NULL)) ++ error(FATAL, "module namespace malloc: %s\n", strerror(errno)); ++ ++ if ((st->load_modules = (struct load_module *)calloc ++ (st->mods_installed, sizeof(struct load_module))) == NULL) ++ error(FATAL, "load_module array malloc: %s\n", strerror(errno)); ++ ++ modbuf = GETBUF(SIZE(module)); ++ modsymbuf = NULL; ++ m = mcnt = mod_next = 0; ++ ++ for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { ++ ++ readmem(mod, KVADDR, modbuf, SIZE(module), ++ "module buffer", FAULT_ON_ERROR); ++ ++ syms = ULONG(modbuf + OFFSET(module_syms)); ++ gpl_syms = ULONG(modbuf + OFFSET(module_gpl_syms)); ++ nsyms = UINT(modbuf + OFFSET(module_num_syms)); ++ ngplsyms = UINT(modbuf + OFFSET(module_num_gpl_syms)); ++ ++ nksyms = UINT(modbuf + OFFSET(module_num_symtab)); ++ ++ mod_name = modbuf + OFFSET(module_name); ++ ++ lm = &st->load_modules[m++]; ++ BZERO(lm, sizeof(struct load_module)); ++ ++ size = 0; ++ for_each_mod_mem_type(t) { ++ lm->mem[t].base = ULONG(modbuf + OFFSET(module_mem) + ++ SIZE(module_memory) * t + OFFSET(module_memory_base)); ++ lm->mem[t].size = UINT(modbuf + OFFSET(module_mem) + ++ SIZE(module_memory) * t + OFFSET(module_memory_size)); ++ if (t < MOD_INIT_TEXT) ++ size += lm->mem[t].size; ++ } ++ lm->mod_base = lm->mem[MOD_TEXT].base; ++ /* module core size, init not included */ ++ lm->mod_size = size; ++ lm->module_struct = mod; ++ ++ if (strlen(mod_name) < MAX_MOD_NAME) ++ strcpy(lm->mod_name, mod_name); ++ else { ++ error(INFO, "module name greater than MAX_MOD_NAME: %s\n", mod_name); ++ strncpy(lm->mod_name, mod_name, MAX_MOD_NAME-1); ++ } ++ if (CRASHDEBUG(3)) ++ fprintf(fp, "%lx (%lx): %s syms: %d gplsyms: %d ksyms: %ld\n", ++ mod, lm->mod_base, lm->mod_name, nsyms, ngplsyms, nksyms); ++ ++ lm->mod_flags = MOD_EXT_SYMS; ++ lm->mod_ext_symcnt = mcnt; ++ lm->mod_text_start = lm->mod_base; ++ lm->mod_init_module_ptr = lm->mem[MOD_INIT_TEXT].base; ++ lm->mod_init_size = lm->mem[MOD_INIT_TEXT].size; ++ lm->mod_init_text_size = lm->mem[MOD_INIT_TEXT].size; ++ ++ if (VALID_MEMBER(module_percpu)) ++ lm->mod_percpu = ULONG(modbuf + OFFSET(module_percpu)); ++ ++ lm_mcnt = mcnt; ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ st->ext_module_symtable[mcnt].value = lm->mem[t].base; ++ st->ext_module_symtable[mcnt].type = 'm'; ++ st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; ++ sprintf(buf2, "%s%s", module_tag[t].start, mod_name); ++ namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, ++ &st->ext_module_symtable[mcnt], buf2); ++ lm_mcnt = mcnt; ++ mcnt++; ++ ++ if (t >= MOD_INIT_TEXT) ++ lm->mod_flags |= MOD_INIT; ++ } ++ ++ if (nsyms && !IN_MODULE(syms, lm)) { ++ error(WARNING, ++ "[%s] module.syms outside of module " "address space (%lx)\n\n", ++ lm->mod_name, syms); ++ nsyms = 0; ++ } ++ ++ if (nsyms) { ++ modsymbuf = GETBUF(kernel_symbol_size*nsyms); ++ readmem((ulong)syms, KVADDR, modsymbuf, ++ nsyms * kernel_symbol_size, ++ "module symbols", FAULT_ON_ERROR); ++ } ++ ++ for (i = first = last = 0; i < nsyms; i++) { ++ modsym = (union kernel_symbol *) ++ (modsymbuf + (i * kernel_symbol_size)); ++ if (!first ++ || first > modsym_name(syms, modsym, i)) ++ first = modsym_name(syms, modsym, i); ++ if (modsym_name(syms, modsym, i) > last) ++ last = modsym_name(syms, modsym, i); ++ } ++ ++ if (last > first) { ++ /* The buffer should not go over the block. */ ++ ulong end = module_mem_end(first, lm); ++ ++ strbuflen = (last-first) + BUFSIZE; ++ if ((first + strbuflen) >= end) { ++ strbuflen = end - first; ++ ++ } ++ strbuf = GETBUF(strbuflen); ++ ++ if (!readmem(first, KVADDR, strbuf, strbuflen, ++ "module symbol strings", RETURN_ON_ERROR)) { ++ FREEBUF(strbuf); ++ strbuf = NULL; ++ } ++ } ++ ++ ++ for (i = 0; i < nsyms; i++) { ++ modsym = (union kernel_symbol *)(modsymbuf + (i * kernel_symbol_size)); ++ ++ BZERO(buf1, BUFSIZE); ++ ++ if (strbuf) ++ strcpy(buf1, &strbuf[modsym_name(syms, modsym, i) - first]); ++ else ++ read_string(modsym_name(syms, modsym, i), buf1, BUFSIZE-1); ++ ++ if (strlen(buf1)) { ++ st->ext_module_symtable[mcnt].value = ++ modsym_value(syms, modsym, i); ++ st->ext_module_symtable[mcnt].type = '?'; ++ st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; ++ strip_module_symbol_end(buf1); ++ strip_symbol_end(buf1, NULL); ++ namespace_ctl(NAMESPACE_INSTALL, ++ &st->ext_module_namespace, ++ &st->ext_module_symtable[mcnt], buf1); ++ ++ mcnt++; ++ } ++ } ++ ++ if (modsymbuf) { ++ FREEBUF(modsymbuf); ++ modsymbuf = NULL; ++ } ++ ++ if (strbuf) ++ FREEBUF(strbuf); ++ ++ if (ngplsyms) { ++ modsymbuf = GETBUF(kernel_symbol_size * ngplsyms); ++ readmem((ulong)gpl_syms, KVADDR, modsymbuf, ++ ngplsyms * kernel_symbol_size, ++ "module gpl symbols", FAULT_ON_ERROR); ++ } ++ ++ for (i = first = last = 0; i < ngplsyms; i++) { ++ modsym = (union kernel_symbol *) ++ (modsymbuf + (i * kernel_symbol_size)); ++ if (!first ++ || first > modsym_name(gpl_syms, modsym, i)) ++ first = modsym_name(gpl_syms, modsym, i); ++ if (modsym_name(gpl_syms, modsym, i) > last) ++ last = modsym_name(gpl_syms, modsym, i); ++ } ++ ++ if (last > first) { ++ ulong end = module_mem_end(first, lm); ++ ++ strbuflen = (last-first) + BUFSIZE; ++ if ((first + strbuflen) >= end) { ++ strbuflen = end - first; ++ ++ } ++ strbuf = GETBUF(strbuflen); ++ ++ if (!readmem(first, KVADDR, strbuf, strbuflen, ++ "module gpl symbol strings", RETURN_ON_ERROR)) { ++ FREEBUF(strbuf); ++ strbuf = NULL; ++ } ++ } else ++ strbuf = NULL; ++ ++ for (i = 0; i < ngplsyms; i++) { ++ modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); ++ ++ BZERO(buf1, BUFSIZE); ++ ++ if (strbuf) ++ strcpy(buf1, &strbuf[modsym_name(gpl_syms, modsym, i) - first]); ++ else ++ read_string(modsym_name(gpl_syms, modsym, i), buf1, BUFSIZE-1); ++ ++ if (strlen(buf1)) { ++ st->ext_module_symtable[mcnt].value = ++ modsym_value(gpl_syms, modsym, i); ++ st->ext_module_symtable[mcnt].type = '?'; ++ st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; ++ strip_module_symbol_end(buf1); ++ strip_symbol_end(buf1, NULL); ++ namespace_ctl(NAMESPACE_INSTALL, ++ &st->ext_module_namespace, ++ &st->ext_module_symtable[mcnt], buf1); ++ ++ mcnt++; ++ } ++ } ++ ++ if (modsymbuf) { ++ FREEBUF(modsymbuf); ++ modsymbuf = NULL; ++ } ++ ++ if (strbuf) ++ FREEBUF(strbuf); ++ ++ /* ++ * If the module was compiled with kallsyms, add them in. ++ */ ++ switch (kt->flags & (KALLSYMS_V1|KALLSYMS_V2)) ++ { ++ case KALLSYMS_V1: /* impossible, I hope... */ ++ mcnt += store_module_kallsyms_v1(lm, lm_mcnt, mcnt, modbuf); ++ break; ++ case KALLSYMS_V2: ++ mcnt += store_module_kallsyms_v2(lm, lm_mcnt, mcnt, modbuf); ++ break; ++ } ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ st->ext_module_symtable[mcnt].value = lm->mem[t].base + lm->mem[t].size; ++ st->ext_module_symtable[mcnt].type = 'm'; ++ st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; ++ sprintf(buf2, "%s%s", module_tag[t].end, mod_name); ++ namespace_ctl(NAMESPACE_INSTALL, ++ &st->ext_module_namespace, ++ &st->ext_module_symtable[mcnt], buf2); ++ mcnt++; ++ } ++ ++ lm->mod_ext_symcnt = mcnt - lm->mod_ext_symcnt; ++ ++ NEXT_MODULE(mod_next, modbuf); ++ } ++ ++ FREEBUF(modbuf); ++ ++ st->ext_module_symcnt = mcnt; ++ st->ext_module_symend = &st->ext_module_symtable[mcnt]; ++ ++ namespace_ctl(NAMESPACE_COMPLETE, &st->ext_module_namespace, ++ st->ext_module_symtable, st->ext_module_symend); ++ ++ qsort(st->ext_module_symtable, mcnt, sizeof(struct syment), ++ compare_syms); ++ ++ /* sort by text base address */ ++ qsort(st->load_modules, m, sizeof(struct load_module), compare_mods); ++ ++ for (m = 0; m < st->mods_installed; m++) { ++ lm = &st->load_modules[m]; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ sprintf(buf1, "%s%s", module_tag[t].start, lm->mod_name); ++ sprintf(buf2, "%s%s", module_tag[t].end, lm->mod_name); ++ ++ for (sp = st->ext_module_symtable; sp < st->ext_module_symend; sp++) { ++ if (STREQ(sp->name, buf1)) { ++ lm->ext_symtable[t] = sp; ++ break; ++ } ++ } ++ for ( ; sp < st->ext_module_symend; sp++) { ++ if (STREQ(sp->name, buf2)) { ++ lm->ext_symend[t] = sp; ++ break; ++ } ++ } ++ ++ if (lm->ext_symtable[t] && lm->ext_symend[t]) ++ mod_symtable_hash_install_range(lm->ext_symtable[t], lm->ext_symend[t]); ++ } ++ lm->symtable = lm->ext_symtable; ++ lm->symend = lm->ext_symend; ++ } ++ ++ st->flags |= MODULE_SYMS; ++ ++ if (CRASHDEBUG(2)) { ++ for (sp = st->ext_module_symtable; sp < st->ext_module_symend; sp++) ++ fprintf(fp, "%16lx %s\n", sp->value, sp->name); ++ } ++ ++ if (mcnt > total) ++ error(FATAL, "store_module_symbols_6_4: total: %ld mcnt: %d\n", total, mcnt); ++} ++ + void + store_module_symbols_v2(ulong total, int mods_installed) + { +@@ -2384,6 +2859,7 @@ store_module_kallsyms_v2(struct load_mod + int mcnt; + int mcnt_idx; + char *module_buf_init = NULL; ++ ulong base, base_init, size, size_init; + + if (!(kt->flags & KALLSYMS_V2)) + return 0; +@@ -2394,9 +2870,22 @@ store_module_kallsyms_v2(struct load_mod + ns = &st->ext_module_namespace; + ec = &elf_common; + +- module_buf = GETBUF(lm->mod_size); ++ /* kallsyms data looks to be in MOD_DATA region. */ ++ if (MODULE_MEMORY()) { ++ base = lm->mem[MOD_DATA].base; ++ size = lm->mem[MOD_DATA].size; ++ base_init = lm->mem[MOD_INIT_DATA].base; ++ size_init = lm->mem[MOD_INIT_DATA].size; ++ } else { ++ base = lm->mod_base; ++ size = lm->mod_size; ++ base_init = lm->mod_init_module_ptr; ++ size_init = lm->mod_init_size; ++ } ++ ++ module_buf = GETBUF(size); + +- if (!readmem(lm->mod_base, KVADDR, module_buf, lm->mod_size, ++ if (!readmem(base, KVADDR, module_buf, size, + "module (kallsyms)", RETURN_ON_ERROR|QUIET)) { + error(WARNING,"cannot access module kallsyms\n"); + FREEBUF(module_buf); +@@ -2404,10 +2893,10 @@ store_module_kallsyms_v2(struct load_mod + } + + if (lm->mod_init_size > 0) { +- module_buf_init = GETBUF(lm->mod_init_size); ++ module_buf_init = GETBUF(size_init); + +- if (!readmem(lm->mod_init_module_ptr, KVADDR, module_buf_init, lm->mod_init_size, +- "module init (kallsyms)", RETURN_ON_ERROR|QUIET)) { ++ if (!readmem(base_init, KVADDR, module_buf_init, size_init, ++ "module init (kallsyms)", RETURN_ON_ERROR|QUIET)) { + error(WARNING,"cannot access module init kallsyms\n"); + FREEBUF(module_buf_init); + } +@@ -2429,9 +2918,9 @@ store_module_kallsyms_v2(struct load_mod + return 0; + } + if (IN_MODULE(ksymtab, lm)) +- locsymtab = module_buf + (ksymtab - lm->mod_base); ++ locsymtab = module_buf + (ksymtab - base); + else +- locsymtab = module_buf_init + (ksymtab - lm->mod_init_module_ptr); ++ locsymtab = module_buf_init + (ksymtab - base_init); + + kstrtab = ULONG(modbuf + OFFSET(module_strtab)); + if (!IN_MODULE(kstrtab, lm) && !IN_MODULE_INIT(kstrtab, lm)) { +@@ -2444,9 +2933,9 @@ store_module_kallsyms_v2(struct load_mod + return 0; + } + if (IN_MODULE(kstrtab, lm)) +- locstrtab = module_buf + (kstrtab - lm->mod_base); ++ locstrtab = module_buf + (kstrtab - base); + else +- locstrtab = module_buf_init + (kstrtab - lm->mod_init_module_ptr); ++ locstrtab = module_buf_init + (kstrtab - base_init); + + for (i = 1; i < nksyms; i++) { /* ELF starts real symbols at 1 */ + switch (BITS()) +@@ -2461,11 +2950,8 @@ store_module_kallsyms_v2(struct load_mod + break; + } + +- if (((ec->st_value < lm->mod_base) || +- (ec->st_value > (lm->mod_base + lm->mod_size))) && +- ((ec->st_value < lm->mod_init_module_ptr) || +- (ec->st_value > (lm->mod_init_module_ptr + lm->mod_init_size)))) +- continue; ++ if (!IN_MODULE(ec->st_value, lm) && !IN_MODULE_INIT(ec->st_value, lm)) ++ continue; + + if (ec->st_shndx == SHN_UNDEF) + continue; +@@ -2572,7 +3058,7 @@ strip_module_symbol_end(char *buf) + ulong + lowest_module_address(void) + { +- int i; ++ int i, t; + struct load_module *lm; + ulong low, lowest; + +@@ -2582,9 +3068,20 @@ lowest_module_address(void) + lowest = (ulong)(-1); + for (i = 0; i < st->mods_installed; i++) { + lm = &st->load_modules[i]; +- low = lm->mod_base; +- if (low < lowest) +- lowest = low; ++ if (MODULE_MEMORY()) ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ low = lm->mem[t].base; ++ if (low < lowest) ++ lowest = low; ++ } ++ else { ++ low = lm->mod_base; ++ if (low < lowest) ++ lowest = low; ++ } + } + + return lowest; +@@ -2593,16 +3090,27 @@ lowest_module_address(void) + ulong + highest_module_address(void) + { +- int i; ++ int i, t; + struct load_module *lm; + ulong high, highest; + + highest = 0; + for (i = 0; i < st->mods_installed; i++) { + lm = &st->load_modules[i]; +- high = lm->mod_base + lm->mod_size; +- if (high > highest) +- highest = high; ++ if (MODULE_MEMORY()) { ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ high = lm->mem[t].base + lm->mem[t].size; ++ if (high > highest) ++ highest = high; ++ } ++ } else { ++ high = lm->mod_base + lm->mod_size; ++ if (high > highest) ++ highest = high; ++ } + } + + return highest; +@@ -2853,7 +3361,8 @@ compare_syms(const void *v1, const void + return -1; + if (STRNEQ(s2->name, "__insmod")) + return 1; +- if (STRNEQ(s2->name, "_MODULE_START_")) ++ if (MODULE_MEM_START(s2, MOD_TEXT) || ++ STRNEQ(s2->name, "_MODULE_START_")) + return 1; + /* Get pseudo section name. */ + if (MODULE_SECTION_START(s1)) +@@ -2986,13 +3495,19 @@ is_kernel_text(ulong value) + if (!(lm->mod_section_data[s].flags & SEC_CODE)) + continue; + +- start = lm->mod_base + +- lm->mod_section_data[s].offset; ++ if (MODULE_MEMORY()) ++ start = lm->mod_section_data[s].addr; ++ else ++ start = lm->mod_base + lm->mod_section_data[s].offset; ++ + end = start + lm->mod_section_data[s].size; + + if ((value >= start) && (value < end)) + return TRUE; + } ++ } else if (MODULE_MEMORY()) { ++ if (IN_MODULE_TEXT(value, lm)) ++ return TRUE; + } else { + switch (kt->flags & (KMOD_V1|KMOD_V2)) + { +@@ -3531,22 +4046,42 @@ dump_symbol_table(void) + (ulong)lm->mod_section_data, + lm->mod_section_data ? "" : "(not allocated)"); + ++ if (MODULE_MEMORY()) { ++ int t; ++ for_each_mod_mem_type(t) { ++ fprintf(fp, " mem[%d]: %lx (%x)\n", ++ t, lm->mem[t].base, lm->mem[t].size); ++ } ++ fprintf(fp, " symtable: %lx\n", (ulong)lm->symtable); ++ fprintf(fp, " ext_symtable: %lx\n", (ulong)lm->ext_symtable); ++ for_each_mod_mem_type(t) { ++ fprintf(fp, " ext_symtable[%d]: %lx - %lx\n", ++ t, (ulong)lm->ext_symtable[t], (ulong)lm->ext_symend[t]); ++ } ++ fprintf(fp, " load_symtable: %lx\n", (ulong)lm->load_symtable); ++ for_each_mod_mem_type(t) { ++ fprintf(fp, " load_symtable[%d]: %lx - %lx\n", ++ t, (ulong)lm->load_symtable[t], (ulong)lm->load_symend[t]); ++ } ++ } + + for (s = 0; s < lm->mod_sections; s++) { + fprintf(fp, +- " %12s prio: %x flags: %05x offset: %-8lx size: %lx\n", ++ " %20s prio: %x flags: %08x %s: %-16lx size: %lx\n", + lm->mod_section_data[s].name, + lm->mod_section_data[s].priority, + lm->mod_section_data[s].flags, +- lm->mod_section_data[s].offset, ++ MODULE_MEMORY() ? "addr" : "offset", ++ MODULE_MEMORY() ? lm->mod_section_data[s].addr : ++ lm->mod_section_data[s].offset, + lm->mod_section_data[s].size); + } + + fprintf(fp, " loaded_objfile: %lx\n", (ulong)lm->loaded_objfile); + +- if (CRASHDEBUG(1)) { ++ if (CRASHDEBUG(1) && lm->mod_load_symtable) { + for (sp = lm->mod_load_symtable; +- sp < lm->mod_load_symend; sp++) { ++ sp <= lm->mod_load_symend; sp++) { + fprintf(fp, " %lx %s\n", + sp->value, sp->name); + } +@@ -4448,8 +4983,11 @@ get_section(ulong vaddr, char *buf) + if (module_symbol(vaddr, NULL, &lm, NULL, *gdb_output_radix)) { + if (lm->mod_flags & MOD_LOAD_SYMS) { + for (i = (lm->mod_sections-1); i >= 0; i--) { +- start = lm->mod_base + +- lm->mod_section_data[i].offset; ++ if (MODULE_MEMORY()) ++ start = lm->mod_section_data[i].addr; ++ else ++ start = lm->mod_base + ++ lm->mod_section_data[i].offset; + end = start + lm->mod_section_data[i].size; + + if ((vaddr >= start) && (vaddr < end)) { +@@ -4504,7 +5042,7 @@ get_build_directory(char *buf) + int + symbol_query(char *s, char *print_pad, struct syment **spp) + { +- int i; ++ int i, t; + struct syment *sp, *sp_end; + struct load_module *lm; + int cnt, search_init; +@@ -4524,6 +5062,60 @@ symbol_query(char *s, char *print_pad, s + } + } + ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ if (lm->mod_flags & MOD_LOAD_SYMS) { ++ sp = lm->mod_load_symtable; ++ sp_end = lm->mod_load_symend; ++ ++ for (; sp < sp_end; sp++) { ++ if (MODULE_PSEUDO_SYMBOL(sp)) ++ continue; ++ ++ if (strstr(sp->name, s)) { ++ if (print_pad) { ++ if (strlen(print_pad)) ++ fprintf(fp, "%s", print_pad); ++ show_symbol(sp, 0, SHOW_RADIX()|SHOW_MODULE); ++ } ++ if (spp) ++ *spp = sp; ++ cnt++; ++ } ++ } ++ } else { ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ ++ for (; sp < sp_end; sp++) { ++ if (MODULE_PSEUDO_SYMBOL(sp)) ++ continue; ++ ++ if (strstr(sp->name, s)) { ++ if (print_pad) { ++ if (strlen(print_pad)) ++ fprintf(fp, "%s", print_pad); ++ show_symbol(sp, 0, SHOW_RADIX()|SHOW_MODULE); ++ } ++ if (spp) ++ *spp = sp; ++ cnt++; ++ } ++ } ++ } ++ } ++ } ++ return cnt; ++ ++old_module: + search_init = FALSE; + + for (i = 0; i < st->mods_installed; i++) { +@@ -4629,7 +5221,7 @@ symbol_search(char *s) + int + symbol_name_count(char *s) + { +- int i; ++ int i, t; + struct syment *sp, *sp_end; + struct load_module *lm; + int count, pseudos, search_init; +@@ -4643,6 +5235,37 @@ symbol_name_count(char *s) + } + } + ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ if (lm->mod_flags & MOD_LOAD_SYMS) { ++ sp = lm->mod_load_symtable; ++ sp_end = lm->mod_load_symend; ++ ++ for (; sp < sp_end; sp++) { ++ if (STREQ(s, sp->name)) ++ count++; ++ } ++ } else { ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ for (; sp < sp_end; sp++) { ++ if (STREQ(s, sp->name)) ++ count++; ++ } ++ } ++ } ++ } ++ return count++; ++ ++old_module: + pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_")); + search_init = FALSE; + +@@ -4692,7 +5315,7 @@ symbol_name_count(char *s) + struct syment * + symbol_search_next(char *s, struct syment *spstart) + { +- int i; ++ int i, t; + struct syment *sp, *sp_end; + struct load_module *lm; + int found_start; +@@ -4712,6 +5335,38 @@ symbol_search_next(char *s, struct symen + } + } + ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ ++ if (!found_start && (spstart < sp || spstart > sp_end)) ++ continue; ++ ++ for ( ; sp < sp_end; sp++) { ++ if (sp == spstart) { ++ found_start = TRUE; ++ continue; ++ } else if (!found_start) ++ continue; ++ ++ if (STREQ(s, sp->name)) ++ return sp; ++ } ++ } ++ } ++ ++ return NULL; ++ ++old_module: + pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_")); + search_init = FALSE; + +@@ -4821,17 +5476,29 @@ module_symbol(ulong value, + for (i = 0; i < st->mods_installed; i++) { + lm = &st->load_modules[i]; + +- if (IN_MODULE(value, lm)) { +- base = lm->mod_base; +- end = lm->mod_base + lm->mod_size; +- } else if (IN_MODULE_INIT(value, lm)) { +- base = lm->mod_init_module_ptr; +- end = lm->mod_init_module_ptr + lm->mod_init_size; +- } else if (IN_MODULE_PERCPU(value, lm)) { +- base = lm->mod_percpu; +- end = lm->mod_percpu + lm->mod_percpu_size; +- } else +- continue; ++ if (MODULE_MEMORY()) { ++ if (IN_MODULE(value, lm) || IN_MODULE_INIT(value, lm)) { ++ int type = module_mem_type(value, lm); ++ base = lm->mem[type].base; ++ end = base + lm->mem[type].size; ++ } else if (IN_MODULE_PERCPU(value, lm)) { ++ base = lm->mod_percpu; ++ end = lm->mod_percpu + lm->mod_percpu_size; ++ } else ++ continue; ++ } else { ++ if (IN_MODULE(value, lm)) { ++ base = lm->mod_base; ++ end = lm->mod_base + lm->mod_size; ++ } else if (IN_MODULE_INIT(value, lm)) { ++ base = lm->mod_init_module_ptr; ++ end = lm->mod_init_module_ptr + lm->mod_init_size; ++ } else if (IN_MODULE_PERCPU(value, lm)) { ++ base = lm->mod_percpu; ++ end = lm->mod_percpu + lm->mod_percpu_size; ++ } else ++ continue; ++ } + + if ((value >= base) && (value < end)) { + if (lmp) +@@ -4867,6 +5534,71 @@ module_symbol(ulong value, + return FALSE; + } + ++static struct syment * ++value_search_module_6_4(ulong value, ulong *offset) ++{ ++ int i, t; ++ struct syment *sp, *sp_end, *spnext, *splast; ++ struct load_module *lm; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ if (!IN_MODULE(value, lm) && !IN_MODULE_INIT(value, lm)) ++ continue; ++ ++ for_each_mod_mem_type(t) { ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ ++ if (value < sp->value) ++ continue; ++ ++ splast = NULL; ++ for ( ; sp <= sp_end; sp++) { ++ if (machine_type("ARM64") && ++ IN_MODULE_PERCPU(sp->value, lm) && ++ !IN_MODULE_PERCPU(value, lm)) ++ continue; ++ ++ if (value == sp->value) { ++ if (MODULE_MEM_END(sp, t)) ++ break; ++ ++ if (MODULE_PSEUDO_SYMBOL(sp)) { ++ spnext = sp + 1; ++ if (MODULE_PSEUDO_SYMBOL(spnext)) ++ continue; ++ if (spnext->value == value) ++ sp = spnext; ++ } ++ if (sp->name[0] == '.') { ++ spnext = sp+1; ++ if (spnext->value == value) ++ sp = spnext; ++ } ++ if (offset) ++ *offset = 0; ++ return sp; ++ } ++ ++ if (sp->value > value) { ++ sp = splast ? splast : sp - 1; ++ if (offset) ++ *offset = value - sp->value; ++ return sp; ++ } ++ ++ if (!MODULE_PSEUDO_SYMBOL(sp)) { ++ splast = sp; ++ } ++ } ++ } ++ } ++ ++ return NULL; ++} ++ + struct syment * + value_search_module(ulong value, ulong *offset) + { +@@ -4875,6 +5607,9 @@ value_search_module(ulong value, ulong * + struct load_module *lm; + int search_init_sections, search_init; + ++ if (MODULE_MEMORY()) ++ return value_search_module_6_4(value, offset); ++ + search_init = FALSE; + search_init_sections = 0; + +@@ -5193,6 +5928,99 @@ closest_symbol_value(ulong value) + return(0); + } + ++/* Only for 6.4 and later */ ++static struct syment * ++next_symbol_by_symname(char *symbol) ++{ ++ struct syment *sp; ++ ++ if ((sp = symbol_search(symbol))) { ++ sp++; ++ if (MODULE_PSEUDO_SYMBOL(sp) && strstr(sp->name, "_END")) ++ return next_module_symbol_by_value(sp->value); ++ ++ return sp; ++ } ++ ++ return NULL; ++} ++ ++/* val_in should be a pseudo module end symbol. */ ++static struct syment * ++next_module_symbol_by_value(ulong val_in) ++{ ++ struct load_module *lm; ++ struct syment *sp, *sp_end; ++ ulong start, min; ++ int i, t; ++ ++retry: ++ sp = sp_end = NULL; ++ min = (ulong)-1; ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ /* Search for the next lowest symtable. */ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ start = lm->symtable[t]->value; ++ if (start > val_in && start < min) { ++ min = start; ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ } ++ } ++ } ++ ++ if (!sp) ++ return NULL; ++ ++ for ( ; sp < sp_end; sp++) { ++ if (MODULE_PSEUDO_SYMBOL(sp)) ++ continue; ++ if (sp->value > val_in) ++ return sp; ++ } ++ ++ /* Found a table that has only pseudo symbols. */ ++ val_in = sp_end->value; ++ goto retry; ++} ++ ++/* Only for 6.4 and later */ ++static struct syment * ++next_module_symbol_by_syment(struct syment *sp_in) ++{ ++ struct load_module *lm; ++ struct syment *sp; ++ int i, t; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ if (sp_in < lm->symtable[t] || sp_in > lm->symend[t]) ++ continue; ++ ++ if (sp_in == lm->symend[t]) ++ return next_module_symbol_by_value(sp_in->value); ++ ++ sp = sp_in + 1; ++ if (MODULE_PSEUDO_SYMBOL(sp)) ++ return next_module_symbol_by_value(sp->value); ++ ++ return sp; ++ } ++ } ++ ++ return NULL; ++} ++ + /* + * For a given symbol, return a pointer to the next (higher) symbol's syment. + * Either a symbol name or syment pointer may be passed as an argument. +@@ -5220,6 +6048,9 @@ next_symbol(char *symbol, struct syment + } + } + ++ if (MODULE_MEMORY()) ++ return next_module_symbol_by_syment(sp_in); ++ + search_init = FALSE; + + for (i = 0; i < st->mods_installed; i++) { +@@ -5263,6 +6094,8 @@ next_symbol(char *symbol, struct syment + return NULL; + } + ++ if (MODULE_MEMORY()) ++ return next_symbol_by_symname(symbol); + + /* + * Deal with a few special cases... +@@ -5302,6 +6135,106 @@ next_symbol(char *symbol, struct syment + return NULL; + } + ++/* Only for 6.4 and later */ ++static struct syment * ++prev_symbol_by_symname(char *symbol) ++{ ++ struct syment *sp; ++ ++ if ((sp = symbol_search(symbol))) { ++ if (sp == st->symtable) ++ return NULL; ++ ++ if (module_symbol(sp->value, NULL, NULL, NULL, 0)) { ++ if (MODULE_PSEUDO_SYMBOL(sp) && strstr(sp->name, "_START")) ++ return prev_module_symbol_by_value(sp->value); ++ else ++ sp--; ++ } else ++ sp--; ++ ++ return sp; ++ } ++ ++ return NULL; ++} ++ ++/* val_in should be a pseudo module start symbol. */ ++static struct syment * ++prev_module_symbol_by_value(ulong val_in) ++{ ++ struct load_module *lm; ++ struct syment *sp, *sp_end; ++ ulong end, max; ++ int i, t; ++ ++retry: ++ sp = sp_end = NULL; ++ max = 0; ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ /* Search for the previous highest table. */ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ end = lm->symend[t]->value; ++ if (end < val_in && end > max) { ++ max = end; ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ } ++ } ++ } ++ ++ if (!sp) ++ return NULL; ++ ++ for ( ; sp_end > sp; sp_end--) { ++ if (MODULE_PSEUDO_SYMBOL(sp_end)) ++ continue; ++ if (sp_end->value < val_in) ++ return sp_end; ++ } ++ ++ /* Found a table that has only pseudo symbols. */ ++ val_in = sp->value; ++ goto retry; ++} ++ ++/* Only for 6.4 and later */ ++static struct syment * ++prev_module_symbol_by_syment(struct syment *sp_in) ++{ ++ struct load_module *lm; ++ struct syment *sp; ++ int i, t; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ if (sp_in < lm->symtable[t] || sp_in > lm->symend[t]) ++ continue; ++ ++ if (sp_in == lm->symtable[t]) ++ return prev_module_symbol_by_value(sp_in->value); ++ ++ sp = sp_in - 1; ++ if (MODULE_PSEUDO_SYMBOL(sp)) ++ return prev_module_symbol_by_value(sp->value); ++ ++ return sp; ++ } ++ } ++ ++ return NULL; ++} ++ + /* + * For a given symbol, return a pointer to the previous (lower) symbol's syment. + * Either a symbol name or syment pointer may be passed as an argument. +@@ -5325,6 +6258,9 @@ prev_symbol(char *symbol, struct syment + sp_prev = sp; + } + ++ if (MODULE_MEMORY()) ++ return prev_module_symbol_by_syment(sp_in); ++ + search_init = FALSE; + + for (i = 0; i < st->mods_installed; i++) { +@@ -5368,6 +6304,9 @@ prev_symbol(char *symbol, struct syment + return NULL; + } + ++ if (MODULE_MEMORY()) ++ return prev_symbol_by_symname(symbol); ++ + if (strstr(symbol, " module)")) { + sprintf(buf, "_MODULE_START_"); + strcat(buf, &symbol[1]); +@@ -5590,7 +6529,7 @@ kernel_symbol_search(char *symbol) + int + get_syment_array(char *symbol, struct syment **sp_array, int max) + { +- int i, cnt; ++ int i, cnt, t; + struct syment *sp, *sp_end; + struct load_module *lm; + +@@ -5615,6 +6554,31 @@ get_syment_array(char *symbol, struct sy + } + } + ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ ++ sp = lm->symtable[t]; ++ sp_end = lm->symend[t]; ++ for (; sp < sp_end; sp++) { ++ if (STREQ(symbol, sp->name)) { ++ if (max && (cnt < max)) ++ sp_array[cnt] = sp; ++ cnt++; ++ } ++ } ++ } ++ } ++ ++ return cnt; ++ ++old_module: + for (i = 0; i < st->mods_installed; i++) { + lm = &st->load_modules[i]; + sp = lm->mod_symtable; +@@ -9218,6 +10182,9 @@ dump_offset_table(char *spec, ulong make + OFFSET(module_strtab)); + fprintf(fp, " module_percpu: %ld\n", + OFFSET(module_percpu)); ++ fprintf(fp, " module_mem: %ld\n", OFFSET(module_mem)); ++ fprintf(fp, " module_memory_base: %ld\n", OFFSET(module_memory_base)); ++ fprintf(fp, " module_memory_size: %ld\n", OFFSET(module_memory_size)); + + fprintf(fp, " module_sect_attrs: %ld\n", + OFFSET(module_sect_attrs)); +@@ -10832,6 +11799,7 @@ dump_offset_table(char *spec, ulong make + SIZE(super_block)); + fprintf(fp, " irqdesc: %ld\n", SIZE(irqdesc)); + fprintf(fp, " module: %ld\n", SIZE(module)); ++ fprintf(fp, " module_memory: %ld\n", SIZE(module_memory)); + fprintf(fp, " module_sect_attr: %ld\n", SIZE(module_sect_attr)); + fprintf(fp, " list_head: %ld\n", SIZE(list_head)); + fprintf(fp, " hlist_head: %ld\n", SIZE(hlist_head)); +@@ -11447,6 +12415,13 @@ store_section_data(struct load_module *l + lm->mod_section_data[i].section = section; + lm->mod_section_data[i].priority = prio; + lm->mod_section_data[i].flags = section->flags & ~SEC_FOUND; ++ lm->mod_section_data[i].size = bfd_section_size(section); ++ lm->mod_section_data[i].offset = 0; ++ lm->mod_section_data[i].addr = 0; ++ if (strlen(name) < MAX_MOD_SEC_NAME) ++ strcpy(lm->mod_section_data[i].name, name); ++ else ++ strncpy(lm->mod_section_data[i].name, name, MAX_MOD_SEC_NAME-1); + /* + * The percpu section isn't included in kallsyms or module_core area. + */ +@@ -11454,13 +12429,8 @@ store_section_data(struct load_module *l + (STREQ(name,".data.percpu") || STREQ(name, ".data..percpu"))) { + lm->mod_percpu_size = bfd_section_size(section); + lm->mod_section_data[i].flags |= SEC_FOUND; ++ lm->mod_section_data[i].addr = lm->mod_percpu; + } +- lm->mod_section_data[i].size = bfd_section_size(section); +- lm->mod_section_data[i].offset = 0; +- if (strlen(name) < MAX_MOD_SEC_NAME) +- strcpy(lm->mod_section_data[i].name, name); +- else +- strncpy(lm->mod_section_data[i].name, name, MAX_MOD_SEC_NAME-1); + lm->mod_sections += 1; + } + +@@ -11702,6 +12672,124 @@ calculate_load_order_v2(struct load_modu + } + } + ++/* Linux 6.4 and later */ ++static void ++calculate_load_order_6_4(struct load_module *lm, bfd *bfd, int dynamic, ++ void *minisyms, long symcount, unsigned int size) ++{ ++ struct syment *s1, *s2; ++ ulong sec_start; ++ bfd_byte *from, *fromend; ++ asymbol *store; ++ asymbol *sym; ++ symbol_info syminfo; ++ char *secname; ++ int i, t; ++ ++ if ((store = bfd_make_empty_symbol(bfd)) == NULL) ++ error(FATAL, "bfd_make_empty_symbol() failed\n"); ++ ++ for_each_mod_mem_type(t) { ++ s1 = lm->symtable[t]; ++ s2 = lm->symend[t]; ++ while (s1 < s2) { ++ ++ if (MODULE_PSEUDO_SYMBOL(s1)) { ++ s1++; ++ continue; ++ } ++ ++ /* Skip over symbols whose sections have been identified. */ ++ for (i = 0; i < lm->mod_sections; i++) { ++ if ((lm->mod_section_data[i].flags & SEC_FOUND) == 0) ++ continue; ++ ++ if (s1->value >= lm->mod_section_data[i].addr && ++ s1->value < lm->mod_section_data[i].addr ++ + lm->mod_section_data[i].size) ++ break; ++ } ++ ++ /* Matched one of the sections. Skip symbol. */ ++ if (i < lm->mod_sections) { ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "skip %lx %s %s\n", s1->value, s1->name, ++ lm->mod_section_data[i].name); ++ s1++; ++ continue; ++ } ++ ++ /* Find the symbol in the object file. */ ++ from = (bfd_byte *) minisyms; ++ fromend = from + symcount * size; ++ secname = NULL; ++ for (; from < fromend; from += size) { ++ if (!(sym = bfd_minisymbol_to_symbol(bfd, dynamic, from, store))) ++ error(FATAL, "bfd_minisymbol_to_symbol() failed\n"); ++ ++ bfd_get_symbol_info(bfd, sym, &syminfo); ++ if (CRASHDEBUG(3)) { ++ fprintf(fp,"matching sym %s %lx against bfd %s %lx\n", ++ s1->name, (long) s1->value, syminfo.name, ++ (long) syminfo.value); ++ } ++ if (strcmp(syminfo.name, s1->name) == 0) { ++ secname = (char *)bfd_section_name(sym->section); ++ break; ++ } ++ ++ } ++ if (secname == NULL) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "symbol %s not found in module\n", s1->name); ++ s1++; ++ continue; ++ } ++ ++ /* Match the section it came in. */ ++ for (i = 0; i < lm->mod_sections; i++) { ++ if (STREQ(lm->mod_section_data[i].name, secname)) ++ break; ++ } ++ ++ if (i == lm->mod_sections) { ++ fprintf(fp, "?? Section %s not found for symbol %s\n", ++ secname, s1->name); ++ s1++; ++ continue; ++ } ++ ++ if (lm->mod_section_data[i].flags & SEC_FOUND) { ++ s1++; ++ continue; ++ } ++ ++ /* Update the offset information for the section */ ++ sec_start = s1->value - syminfo.value; ++ /* keep the address instead of offset */ ++ lm->mod_section_data[i].addr = sec_start; ++ lm->mod_section_data[i].flags |= SEC_FOUND; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "update sec offset sym %s @ %lx val %lx section %s\n", ++ s1->name, s1->value, (ulong)syminfo.value, secname); ++ ++ if (strcmp(secname, ".text") == 0) ++ lm->mod_text_start = sec_start; ++ ++ if (strcmp(secname, ".bss") == 0) ++ lm->mod_bss_start = sec_start; ++ ++ if (strcmp(secname, ".data") == 0) ++ lm->mod_data_start = sec_start; ++ ++ if (strcmp(secname, ".rodata") == 0) ++ lm->mod_rodata_start = sec_start; ++ s1++; ++ } ++ } ++} ++ + /* + * Later versons of insmod store basic address information of each + * module in a format that looks like the following example of the +@@ -11907,8 +12995,11 @@ add_symbol_file(struct load_module *lm) + (!STREQ(secname, ".text") && + !STREQ(secname, ".data.percpu") && + !STREQ(secname, ".data..percpu"))) { +- sprintf(buf, " -s %s 0x%lx", secname, +- lm->mod_section_data[i].offset + lm->mod_base); ++ if (MODULE_MEMORY()) ++ sprintf(buf, " -s %s 0x%lx", secname, lm->mod_section_data[i].addr); ++ else ++ sprintf(buf, " -s %s 0x%lx", secname, ++ lm->mod_section_data[i].offset + lm->mod_base); + len += strlen(buf); + } + } +@@ -12249,24 +13340,43 @@ static struct syment * + kallsyms_module_symbol(struct load_module *lm, symbol_info *syminfo) + { + struct syment *sp, *spx; +- int cnt; ++ int cnt, t; + + if (!(lm->mod_flags & MOD_KALLSYMS)) + return NULL; + + sp = NULL; + cnt = 0; +- for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) { +- if (!STREQ(spx->name, syminfo->name)) +- continue; +- if (spx->cnt) { +- cnt++; +- continue; +- } ++ if (MODULE_MEMORY()) { ++ for_each_mod_mem_type(t) { ++ if (!lm->ext_symtable[t]) ++ continue; ++ for (spx = lm->ext_symtable[t]; spx <= lm->ext_symend[t]; spx++) { ++ if (!STREQ(spx->name, syminfo->name)) ++ continue; ++ if (spx->cnt) { ++ cnt++; ++ continue; ++ } + +- spx->cnt++; +- sp = spx; +- break; ++ spx->cnt++; ++ sp = spx; ++ break; ++ } ++ } ++ } else { ++ for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) { ++ if (!STREQ(spx->name, syminfo->name)) ++ continue; ++ if (spx->cnt) { ++ cnt++; ++ continue; ++ } ++ ++ spx->cnt++; ++ sp = spx; ++ break; ++ } + } + + if (CRASHDEBUG(2)) { +@@ -12292,7 +13402,7 @@ static void + store_load_module_symbols(bfd *bfd, int dynamic, void *minisyms, + long symcount, unsigned int size, ulong base_addr, char *namelist) + { +- int i; ++ int i, t; + asymbol *store; + asymbol *sym; + bfd_byte *from, *fromend; +@@ -12303,7 +13413,7 @@ store_load_module_symbols(bfd *bfd, int + char *nameptr, *secname; + long index; + long symalloc; +- int found; ++ int found = FALSE; + + if ((store = bfd_make_empty_symbol(bfd)) == NULL) + error(FATAL, "bfd_make_empty_symbol() failed\n"); +@@ -12360,8 +13470,17 @@ store_load_module_symbols(bfd *bfd, int + lm->mod_rodata_start = lm->mod_bss_start = 0; + lm->mod_load_symcnt = 0; + lm->mod_sections = 0; +- for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) +- spx->cnt = 0; ++ if (MODULE_MEMORY()) { ++ for_each_mod_mem_type(t) { ++ if (!lm->ext_symtable[t]) ++ continue; ++ for (spx = lm->ext_symtable[t]; spx <= lm->ext_symend[t]; spx++) ++ spx->cnt = 0; ++ } ++ } else { ++ for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) ++ spx->cnt = 0; ++ } + sp = lm->mod_load_symtable; + + if (!(lm->mod_section_data = (struct mod_section_data *) +@@ -12372,13 +13491,14 @@ store_load_module_symbols(bfd *bfd, int + + bfd_map_over_sections(bfd, section_header_info, MODULE_SECTIONS); + +- if (kt->flags & KMOD_V1) ++ if (MODULE_MEMORY()) ++ calculate_load_order_6_4(lm, bfd, dynamic, minisyms, symcount, size); ++ else if (kt->flags & KMOD_V1) + calculate_load_order_v1(lm, bfd); + else + calculate_load_order_v2(lm, bfd, dynamic, minisyms, + symcount, size); + +- + from = (bfd_byte *) minisyms; + fromend = from + symcount * size; + for (; from < fromend; from += size) +@@ -12481,7 +13601,10 @@ store_load_module_symbols(bfd *bfd, int + syminfo.value += lm->mod_percpu; + found = TRUE; + } else { +- syminfo.value += lm->mod_section_data[i].offset + lm->mod_base; ++ if (MODULE_MEMORY()) ++ syminfo.value += lm->mod_section_data[i].addr; ++ else ++ syminfo.value += lm->mod_section_data[i].offset + lm->mod_base; + found = TRUE; + } + } +@@ -12516,6 +13639,53 @@ store_load_module_symbols(bfd *bfd, int + * syminfo data types accepted above, plus the two pseudo symbols. + * Note that the new syment name pointers haven't been resolved yet. + */ ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->ext_symtable[t]) ++ continue; ++ for (spx = lm->ext_symtable[t]; spx <= lm->ext_symend[t]; spx++) { ++ found = FALSE; ++ for (sp = lm->mod_load_symtable; sp < lm->mod_load_symend; sp++) { ++ index = (long)sp->name; ++ nameptr = &lm->mod_load_namespace.address[index]; ++ if (STREQ(spx->name, nameptr)) { ++ found = TRUE; ++ if (spx->value == sp->value) { ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: %s matches!\n", ++ lm->mod_name, nameptr); ++ } else { ++ if (CRASHDEBUG(2)) ++ fprintf(fp, ++ "[%s] %s: %lx != extern'd value: %lx\n", ++ lm->mod_name, ++ nameptr, sp->value, ++ spx->value); ++ } ++ break; ++ } ++ } ++ if (!found) { ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "append ext %s (%lx)\n", spx->name, spx->value); ++ /* append it here... */ ++ namespace_ctl(NAMESPACE_INSTALL, ++ &lm->mod_load_namespace, ++ lm->mod_load_symend, spx->name); ++ ++ lm->mod_load_symend->value = spx->value; ++ lm->mod_load_symend->type = spx->type; ++ lm->mod_load_symend->flags |= MODULE_SYMBOL; ++ lm->mod_load_symend++; ++ lm->mod_load_symcnt++; ++ } ++ } ++ } ++ goto append_section_symbols; ++ ++old_module: + for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) { + found = FALSE; + for (sp = lm->mod_load_symtable; +@@ -12558,6 +13728,7 @@ store_load_module_symbols(bfd *bfd, int + } + } + ++append_section_symbols: + /* + * Append helpful pseudo symbols about found out sections. + * Use 'S' as its type which is never seen in existing symbols. +@@ -12567,8 +13738,11 @@ store_load_module_symbols(bfd *bfd, int + if (!(lm->mod_section_data[i].flags & SEC_FOUND)) + continue; + /* Section start */ +- lm->mod_load_symend->value = lm->mod_base + +- lm->mod_section_data[i].offset; ++ if (MODULE_MEMORY()) ++ lm->mod_load_symend->value = lm->mod_section_data[i].addr; ++ else ++ lm->mod_load_symend->value = lm->mod_base + ++ lm->mod_section_data[i].offset; + lm->mod_load_symend->type = 'S'; + lm->mod_load_symend->flags |= MODULE_SYMBOL; + sprintf(name, "_MODULE_SECTION_START [%s]", +@@ -12579,9 +13753,12 @@ store_load_module_symbols(bfd *bfd, int + lm->mod_load_symcnt++; + + /* Section end */ +- lm->mod_load_symend->value = lm->mod_base + +- lm->mod_section_data[i].offset + +- lm->mod_section_data[i].size; ++ if (MODULE_MEMORY()) ++ lm->mod_load_symend->value = lm->mod_section_data[i].addr; ++ else ++ lm->mod_load_symend->value = lm->mod_base + ++ lm->mod_section_data[i].offset; ++ lm->mod_load_symend->value += lm->mod_section_data[i].size; + lm->mod_load_symend->type = 'S'; + lm->mod_load_symend->flags |= MODULE_SYMBOL; + sprintf(name, "_MODULE_SECTION_END [%s]", +@@ -12598,16 +13775,57 @@ store_load_module_symbols(bfd *bfd, int + qsort(lm->mod_load_symtable, lm->mod_load_symcnt, sizeof(struct syment), + compare_syms); + ++ if (MODULE_MEMORY()) { ++ /* keep load symtable addresses to lm->load_symtable[] */ ++ /* TODO: make more efficient */ ++ for (sp = lm->mod_load_symtable; sp < lm->mod_load_symend; sp++) { ++ char buf1[BUFSIZE], buf2[BUFSIZE]; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "DEBUG: value %16lx name %s\n", sp->value, sp->name); ++ ++ if (!MODULE_PSEUDO_SYMBOL(sp)) ++ continue; ++ ++ for_each_mod_mem_type(t) { ++ if (!lm->mem[t].size) ++ continue; ++ ++ sprintf(buf1, "%s%s", module_tag[t].start, lm->mod_name); ++ sprintf(buf2, "%s%s", module_tag[t].end, lm->mod_name); ++ ++ if (STREQ(sp->name, buf1)) { ++ lm->load_symtable[t] = sp; ++ break; ++ } else if (STREQ(sp->name, buf2)) { ++ lm->load_symend[t] = sp; ++ break; ++ } ++ } ++ } ++ } ++ + lm->mod_load_symend--; +- if (!MODULE_END(lm->mod_load_symend) && ++ if (!MODULE_MEMORY() && !MODULE_END(lm->mod_load_symend) && + !IN_MODULE_PERCPU(lm->mod_load_symend->value, lm)) + error(INFO, "%s: last symbol: %s is not _MODULE_END_%s?\n", + lm->mod_name, lm->mod_load_symend->name, lm->mod_name); + +- mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); +- lm->mod_symtable = lm->mod_load_symtable; +- lm->mod_symend = lm->mod_load_symend; +- mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); ++ if (MODULE_MEMORY()) { ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ mod_symtable_hash_remove_range(lm->symtable[t], lm->symend[t]); ++ } ++ lm->symtable = lm->load_symtable; ++ lm->symend = lm->load_symend; ++ mod_symtable_hash_install_range(lm->mod_load_symtable, lm->mod_load_symend); ++ } else { ++ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); ++ lm->mod_symtable = lm->mod_load_symtable; ++ lm->mod_symend = lm->mod_load_symend; ++ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); ++ } + + lm->mod_flags &= ~MOD_EXT_SYMS; + lm->mod_flags |= MOD_LOAD_SYMS; +@@ -12622,7 +13840,7 @@ store_load_module_symbols(bfd *bfd, int + void + delete_load_module(ulong base_addr) + { +- int i; ++ int i, t; + struct load_module *lm; + struct gnu_request request, *req; + +@@ -12637,7 +13855,18 @@ delete_load_module(ulong base_addr) + req->name = lm->mod_namelist; + gdb_interface(req); + } +- mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); ++ if (MODULE_MEMORY()) { ++ if (lm->mod_load_symtable) { ++ mod_symtable_hash_remove_range(lm->mod_load_symtable, ++ lm->mod_load_symend); ++ for_each_mod_mem_type(t) { ++ lm->load_symtable[t] = NULL; ++ lm->load_symend[t] = NULL; ++ } ++ } ++ } else ++ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); ++ + if (lm->mod_load_symtable) { + free(lm->mod_load_symtable); + namespace_ctl(NAMESPACE_FREE, +@@ -12645,9 +13874,23 @@ delete_load_module(ulong base_addr) + } + if (lm->mod_flags & MOD_REMOTE) + unlink_module(lm); +- lm->mod_symtable = lm->mod_ext_symtable; +- lm->mod_symend = lm->mod_ext_symend; +- mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); ++ ++ if (MODULE_MEMORY()) { ++ if (lm->mod_load_symtable) { /* still non-NULL */ ++ lm->symtable = lm->ext_symtable; ++ lm->symend = lm->ext_symend; ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ mod_symtable_hash_install_range(lm->symtable[t], ++ lm->symend[t]); ++ } ++ } ++ } else { ++ lm->mod_symtable = lm->mod_ext_symtable; ++ lm->mod_symend = lm->mod_ext_symend; ++ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); ++ } + lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH); + lm->mod_flags |= MOD_EXT_SYMS; + lm->mod_load_symtable = NULL; +@@ -12676,7 +13919,18 @@ delete_load_module(ulong base_addr) + req->name = lm->mod_namelist; + gdb_interface(req); + } +- mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); ++ if (MODULE_MEMORY()) { ++ if (lm->mod_load_symtable) { ++ mod_symtable_hash_remove_range(lm->mod_load_symtable, ++ lm->mod_load_symend); ++ for_each_mod_mem_type(t) { ++ lm->load_symtable[t] = NULL; ++ lm->load_symend[t] = NULL; ++ } ++ } ++ } else ++ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); ++ + if (lm->mod_load_symtable) { + free(lm->mod_load_symtable); + namespace_ctl(NAMESPACE_FREE, +@@ -12684,9 +13938,23 @@ delete_load_module(ulong base_addr) + } + if (lm->mod_flags & MOD_REMOTE) + unlink_module(lm); +- lm->mod_symtable = lm->mod_ext_symtable; +- lm->mod_symend = lm->mod_ext_symend; +- mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); ++ ++ if (MODULE_MEMORY()) { ++ if (lm->mod_load_symtable) { ++ lm->symtable = lm->ext_symtable; ++ lm->symend = lm->ext_symend; ++ for_each_mod_mem_type(t) { ++ if (!lm->symtable[t]) ++ continue; ++ mod_symtable_hash_install_range(lm->symtable[t], ++ lm->symend[t]); ++ } ++ } ++ } else { ++ lm->mod_symtable = lm->mod_ext_symtable; ++ lm->mod_symend = lm->mod_ext_symend; ++ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); ++ } + lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH); + lm->mod_flags |= MOD_EXT_SYMS; + lm->mod_load_symtable = NULL; +@@ -13435,7 +14703,7 @@ is_downsized(char *name) + struct syment * + symbol_complete_match(const char *match, struct syment *sp_last) + { +- int i; ++ int i, t; + struct syment *sp, *sp_end, *sp_start; + struct load_module *lm; + int search_init; +@@ -13455,6 +14723,34 @@ symbol_complete_match(const char *match, + sp_start = NULL; + } + ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ for_each_mod_mem_type(t) { ++ sp_end = lm->symend[t]; ++ if (!sp_start) ++ sp_start = lm->symtable[t]; ++ ++ if (sp_start < lm->symtable[t] || sp_start > sp_end) ++ continue; ++ ++ for (sp = sp_start; sp < sp_end; sp++) { ++ if (MODULE_PSEUDO_SYMBOL(sp)) ++ continue; ++ ++ if (STRNEQ(sp->name, match)) ++ return sp; ++ } ++ sp_start = NULL; ++ } ++ } ++ ++ return NULL; ++ ++old_module: + search_init = FALSE; + + for (i = 0; i < st->mods_installed; i++) { +@@ -13501,3 +14797,58 @@ symbol_complete_match(const char *match, + + return NULL; + } ++ ++/* Returns module memory type if addr is in range, otherwise MOD_INVALID(-1) */ ++static int ++in_module_range(ulong addr, struct load_module *lm, int start, int end) ++{ ++ ulong base = 0, size = 0; ++ int i; ++ ++ if (!MODULE_MEMORY()) ++ goto old_module; ++ ++ for (i = start ; i <= end; i++) { ++ base = lm->mem[i].base; ++ size = lm->mem[i].size; ++ if (!size) ++ continue; ++ if ((addr >= base) && (addr < (base + size))) ++ return i; ++ } ++ return MOD_INVALID; ++ ++old_module: ++ if (start == MOD_TEXT) { ++ base = lm->mod_base; ++ size = lm->mod_size; ++ } else if (start == MOD_INIT_TEXT) { ++ base = lm->mod_init_module_ptr; ++ size = lm->mod_init_size; ++ } else ++ error(FATAL, "invalid module memory type!"); ++ ++ if ((addr >= base) && (addr < (base + size))) ++ return start; ++ ++ return MOD_INVALID; ++} ++ ++/* Returns module memory type, otherwise MOD_INVALID(-1) */ ++static int ++module_mem_type(ulong addr, struct load_module *lm) ++{ ++ return in_module_range(addr, lm, MOD_TEXT, MOD_INIT_RODATA); ++} ++ ++/* Returns the end address of the module memory region. */ ++static ulong ++module_mem_end(ulong addr, struct load_module *lm) ++{ ++ int type = module_mem_type(addr, lm); ++ ++ if (type == MOD_INVALID) ++ return 0; ++ ++ return lm->mem[type].base + lm->mem[type].size; ++} diff -Nru crash-8.0.2/debian/patches/lp2038249-0017-Fix-failure-of-gathering-task-table-on-Linux-6.5-rc1.patch crash-8.0.2/debian/patches/lp2038249-0017-Fix-failure-of-gathering-task-table-on-Linux-6.5-rc1.patch --- crash-8.0.2/debian/patches/lp2038249-0017-Fix-failure-of-gathering-task-table-on-Linux-6.5-rc1.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0017-Fix-failure-of-gathering-task-table-on-Linux-6.5-rc1.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,105 @@ +From: Kazuhito Hagio +Date: Fri Jun 23 16:34:35 2023 +0900 +Subject: Fix failure of gathering task table on Linux 6.5-rc1 and later + +Kernel commit b69f0aeb0689 ("pid: Replace struct pid 1-element array +with flex-array") changed pid.numbers[1] to pid.numbers[]. With this, +the size of struct pid does not contain the size of struct upid: + + (gdb) ptype /o struct pid + /* offset | size */ type = struct pid { + /* 0 | 4 */ refcount_t count; + ... + /* 96 | 0 */ struct upid numbers[]; + ^^^^ ^^^ + /* total size (bytes): 96 */ + } ^^^^ + +As a result, in refresh_xarray_task_table(), crash does not read the +data of pid.numbers[0].ns and cannot gather the task table correctly. + + $ crash vmlinux vmcore + ... + WARNING: active task ffff936992ad0000 on cpu 1 not found in PID hash + ... + crash> ps -S + RU: 9 + crash> + +Increase the size of reading struct pid by SIZE(upid) in this case. + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/88580068b7dd96bf679c82bdc05e146968ade10c + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2414,6 +2414,7 @@ struct array_table { + int task_struct_rlim; + int signal_struct_rlim; + int vm_numa_stat; ++ int pid_numbers; + }; + + /* +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -9695,6 +9695,8 @@ builtin_array_length(char *s, int len, i + lenptr = &array_table.signal_struct_rlim; + else if (STREQ(s, "vm_numa_stat")) + lenptr = &array_table.vm_numa_stat; ++ else if (STREQ(s, "pid.numbers")) ++ lenptr = &array_table.pid_numbers; + + if (!lenptr) /* not stored */ + return(len); +@@ -12087,6 +12089,7 @@ dump_offset_table(char *spec, ulong make + ARRAY_LENGTH(signal_struct_rlim)); + fprintf(fp, " vm_numa_stat: %d\n", + ARRAY_LENGTH(vm_numa_stat)); ++ fprintf(fp, " pid_numbers: %d\n", ARRAY_LENGTH(pid_numbers)); + + if (spec) { + int in_size_table, in_array_table, arrays, offsets, sizes; +--- crash-8.0.2.orig/task.c ++++ crash-8.0.2/task.c +@@ -352,6 +352,7 @@ task_init(void) + MEMBER_OFFSET_INIT(upid_ns, "upid", "ns"); + MEMBER_OFFSET_INIT(upid_pid_chain, "upid", "pid_chain"); + MEMBER_OFFSET_INIT(pid_numbers, "pid", "numbers"); ++ ARRAY_LENGTH_INIT(len, pid_numbers, "pid.numbers", NULL, 0); + MEMBER_OFFSET_INIT(pid_tasks, "pid", "tasks"); + tt->init_pid_ns = symbol_value("init_pid_ns"); + } +@@ -2571,6 +2572,7 @@ refresh_xarray_task_table(void) + char *tp; + struct list_pair xp; + char *pidbuf; ++ long pid_size = SIZE(pid); + + if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ + return; +@@ -2600,8 +2602,12 @@ refresh_xarray_task_table(void) + if (CRASHDEBUG(1)) + console("xarray: count: %ld\n", count); + ++ /* 6.5: b69f0aeb0689 changed pid.numbers[1] to numbers[] */ ++ if (ARRAY_LENGTH(pid_numbers) == 0) ++ pid_size += SIZE(upid); ++ + retries = 0; +- pidbuf = GETBUF(SIZE(pid)); ++ pidbuf = GETBUF(pid_size); + + retry_xarray: + if (retries && DUMPFILE()) +@@ -2669,7 +2675,7 @@ retry_xarray: + * - get task from address of task->pids[0] + */ + if (!readmem(next, KVADDR, pidbuf, +- SIZE(pid), "pid", RETURN_ON_ERROR|QUIET)) { ++ pid_size, "pid", RETURN_ON_ERROR|QUIET)) { + error(INFO, "\ncannot read pid struct from xarray\n"); + if (DUMPFILE()) + continue; diff -Nru crash-8.0.2/debian/patches/lp2038249-0018-Fix-compilation-error-due-to-new-strlcpy-function-th.patch crash-8.0.2/debian/patches/lp2038249-0018-Fix-compilation-error-due-to-new-strlcpy-function-th.patch --- crash-8.0.2/debian/patches/lp2038249-0018-Fix-compilation-error-due-to-new-strlcpy-function-th.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0018-Fix-compilation-error-due-to-new-strlcpy-function-th.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,55 @@ +From: Lianbo Jiang +Date: Wed Jul 5 10:02:59 2023 +0800 +Subject: Fix compilation error due to new strlcpy function that glibc added + +The crash-utility has its own strlcpy(), but recently the latest glibc +has also implemented the strlcpy function, which is derived from +OpenBSD. Eventually this caused the following compilation error: + + # make -j8 lzo + ... + In file included from global_data.c:18: + defs.h:5556:8: error: conflicting types for ‘strlcpy’; have ‘size_t(char *, char *, size_t)’ {aka ‘long unsigned int(char *, char *, long unsigned int)’} + 5556 | size_t strlcpy(char *, char *, size_t); + | ^~~~~~~ + In file included from memory.c:19: + defs.h:5556:8: error: conflicting types for ‘strlcpy’; have ‘size_t(char *, char *, size_t)’ {aka ‘long unsigned int(char *, char *, long unsigned int)’} + 5556 | size_t strlcpy(char *, char *, size_t); + | ^~~~~~~ + ... + +To fix the issue, let's declare the strlcpy() as a weak function and +keep the same parameter types as the glibc function has. + +Related glibc commits: +454a20c8756c ("Implement strlcpy and strlcat [BZ #178]") +d2fda60e7c40 ("manual: Manual update for strlcat, strlcpy, wcslcat, wclscpy") +388ae538ddcb ("hurd: Add strlcpy, strlcat, wcslcpy, wcslcat to libc.abilist") + +Signed-off-by: Lianbo Jiang + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/4ee56105881d7bb1da1e668ac5bb47a4e0846676 + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -5355,7 +5355,7 @@ uint32_t swap32(uint32_t, int); + uint64_t swap64(uint64_t, int); + ulong *get_cpumask_buf(void); + int make_cpumask(char *, ulong *, int, int *); +-size_t strlcpy(char *, char *, size_t); ++size_t strlcpy(char *, const char *, size_t) __attribute__ ((__weak__)); + struct rb_node *rb_first(struct rb_root *); + struct rb_node *rb_parent(struct rb_node *, struct rb_node *); + struct rb_node *rb_right(struct rb_node *, struct rb_node *); +--- crash-8.0.2.orig/tools.c ++++ crash-8.0.2/tools.c +@@ -6762,7 +6762,7 @@ make_cpumask_error: + * always be NULL-terminated. + */ + size_t +-strlcpy(char *dest, char *src, size_t size) ++strlcpy(char *dest, const char *src, size_t size) + { + size_t ret = strlen(src); + diff -Nru crash-8.0.2/debian/patches/lp2038249-0019-Fix-irq-a-option-on-Linux-6.0-and-later.patch crash-8.0.2/debian/patches/lp2038249-0019-Fix-irq-a-option-on-Linux-6.0-and-later.patch --- crash-8.0.2/debian/patches/lp2038249-0019-Fix-irq-a-option-on-Linux-6.0-and-later.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0019-Fix-irq-a-option-on-Linux-6.0-and-later.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,38 @@ +From: Kazuhito Hagio +Date: Mon Jul 10 10:42:08 2023 +0900 +Subject: Fix "irq -a" option on Linux 6.0 and later + +Kernel commit f0dd891dd5a1d ("lib/cpumask: move some one-line wrappers +to header file"), which is contained in Linux 6.0 and later kernels, +inlined alloc_cpumask_var() function. As a result, the "irq -a" option +fails to determine that cpumask_var_t is a pointer, and displays wrong +CPU affinity for IRQs: + + crash> irq -a + IRQ NAME AFFINITY + 1 i8042 3 + 4 ttyS0 + 8 rtc0 + 9 acpi 3 + 12 i8042 3 + ... + +Use alloc_cpumask_var_node() function symbol instead to fix it. + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/6d0be1316aa3666895c0a8a0d3c98c235ec03bd4 + +--- crash-8.0.2.orig/kernel.c ++++ crash-8.0.2/kernel.c +@@ -7294,7 +7294,8 @@ generic_get_irq_affinity(int irq) + tmp_addr = irq_desc_addr + \ + OFFSET(irq_desc_t_affinity); + +- if (symbol_exists("alloc_cpumask_var")) /* pointer member */ ++ if (symbol_exists("alloc_cpumask_var_node") || ++ symbol_exists("alloc_cpumask_var")) /* pointer member */ + readmem(tmp_addr,KVADDR, &affinity_ptr, sizeof(ulong), + "irq_desc affinity", FAULT_ON_ERROR); + else /* array member */ diff -Nru crash-8.0.2/debian/patches/lp2038249-0020-Exclude-zero-entries-from-do_maple_tree-return-value.patch crash-8.0.2/debian/patches/lp2038249-0020-Exclude-zero-entries-from-do_maple_tree-return-value.patch --- crash-8.0.2/debian/patches/lp2038249-0020-Exclude-zero-entries-from-do_maple_tree-return-value.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0020-Exclude-zero-entries-from-do_maple_tree-return-value.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,26 @@ +From: Kazuhito Hagio +Date: Fri, 7 Jul 2023 15:17:18 +0900 +Subject: Exclude zero entries from do_maple_tree() return value + +While the return value of do_radix_tree() and do_xarray() does not +contain NULL entries, do_maple_tree()'s one contains NULL entries. + +Make this behavior consistent with the previous tree functions to make +replacement easier, especially for the following patch. + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/d17d51a92a3a1c1cce1e646c38fe52ca99406cf9 + +--- crash-8.0.2.orig/maple_tree.c ++++ crash-8.0.2/maple_tree.c +@@ -279,7 +279,7 @@ static void do_mt_entry(ulong entry, ulo + static struct req_entry **e = NULL; + struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; + +- if (ops->entry) ++ if (ops->entry && entry) + ops->entry(entry, entry, path, max, ops->private); + + if (!td) diff -Nru crash-8.0.2/debian/patches/lp2038249-0021-Fix-irq-a-s-options-on-Linux-6.5-rc1-and-later.patch crash-8.0.2/debian/patches/lp2038249-0021-Fix-irq-a-s-options-on-Linux-6.5-rc1-and-later.patch --- crash-8.0.2/debian/patches/lp2038249-0021-Fix-irq-a-s-options-on-Linux-6.5-rc1-and-later.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0021-Fix-irq-a-s-options-on-Linux-6.5-rc1-and-later.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,179 @@ +From: Kazuhito Hagio +Date: Wed Jul 12 17:55:29 2023 +0900 +Subject: Fix "irq [-a|-s]" options on Linux 6.5-rc1 and later + +Kernel commit 721255b982 ("genirq: Use a maple tree for interrupt +descriptor management"), which is contained in Linux 6.5-rc1 and later +kernels, replaced irq_desc_tree with a maple tree sparse_irqs. + +Without the patch, "irq [-a|-s]" options fail with an error, e.g. the +following on x86_64, on kernels configured with CONFIG_SPARSE_IRQ=y. + + crash> irq + irq: x86_64_dump_irq: irq_desc[] or irq_desc_tree do not exist? + +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/38d35bd1423ccafd0b8be0744155ce59ef3034ff + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -670,6 +670,7 @@ struct new_utsname { + #define IRQ_DESC_TREE_XARRAY (0x80ULL) + #define KMOD_PAX (0x100ULL) + #define KMOD_MEMORY (0x200ULL) ++#define IRQ_DESC_TREE_MAPLE (0x400ULL) + + #define XEN() (kt->flags & ARCH_XEN) + #define OPENVZ() (kt->flags & ARCH_OPENVZ) +@@ -2206,6 +2207,7 @@ struct offset_table { + long module_mem; + long module_memory_base; + long module_memory_size; ++ long irq_data_irq; + }; + + struct size_table { /* stash of commonly-used sizes */ +--- crash-8.0.2.orig/ia64.c ++++ crash-8.0.2/ia64.c +@@ -791,7 +791,8 @@ ia64_back_trace_cmd(struct bt_info *bt) + static void + ia64_dump_irq(int irq) + { +- if (symbol_exists("irq_desc") || symbol_exists("_irq_desc") || ++ if (kernel_symbol_exists("sparse_irqs") || ++ symbol_exists("irq_desc") || symbol_exists("_irq_desc") || + kernel_symbol_exists("irq_desc_ptrs")) { + machdep->dump_irq = generic_dump_irq; + return(generic_dump_irq(irq)); +--- crash-8.0.2.orig/kernel.c ++++ crash-8.0.2/kernel.c +@@ -541,7 +541,10 @@ kernel_init() + MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id"); + MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next"); + +- if (kernel_symbol_exists("irq_desc_tree")) { ++ /* 6.5 and later: CONFIG_SPARSE_IRQ */ ++ if (kernel_symbol_exists("sparse_irqs")) ++ kt->flags2 |= IRQ_DESC_TREE_MAPLE; ++ else if (kernel_symbol_exists("irq_desc_tree")) { + get_symbol_type("irq_desc_tree", NULL, &req); + if (STREQ(req.type_tag_name, "xarray")) { + kt->flags2 |= IRQ_DESC_TREE_XARRAY; +@@ -554,6 +557,7 @@ kernel_init() + } + STRUCT_SIZE_INIT(irq_data, "irq_data"); + if (VALID_STRUCT(irq_data)) { ++ MEMBER_OFFSET_INIT(irq_data_irq, "irq_data", "irq"); + MEMBER_OFFSET_INIT(irq_data_chip, "irq_data", "chip"); + MEMBER_OFFSET_INIT(irq_data_affinity, "irq_data", "affinity"); + MEMBER_OFFSET_INIT(irq_desc_irq_data, "irq_desc", "irq_data"); +@@ -6176,6 +6180,8 @@ dump_kernel_table(int verbose) + fprintf(fp, "%sIRQ_DESC_TREE_RADIX", others++ ? "|" : ""); + if (kt->flags2 & IRQ_DESC_TREE_XARRAY) + fprintf(fp, "%sIRQ_DESC_TREE_XARRAY", others++ ? "|" : ""); ++ if (kt->flags2 & IRQ_DESC_TREE_MAPLE) ++ fprintf(fp, "%sIRQ_DESC_TREE_MAPLE", others++ ? "|" : ""); + if (kt->flags2 & KMOD_PAX) + fprintf(fp, "%sKMOD_PAX", others++ ? "|" : ""); + if (kt->flags2 & KMOD_MEMORY) +@@ -6648,6 +6654,45 @@ get_irq_desc_addr(int irq) + readmem(ptr, KVADDR, &addr, + sizeof(void *), "irq_desc_ptrs entry", + FAULT_ON_ERROR); ++ } else if (kt->flags2 & IRQ_DESC_TREE_MAPLE) { ++ unsigned int i; ++ ++ if (kt->highest_irq && (irq > kt->highest_irq)) ++ return addr; ++ ++ cnt = do_maple_tree(symbol_value("sparse_irqs"), MAPLE_TREE_COUNT, NULL); ++ ++ len = sizeof(struct list_pair) * (cnt+1); ++ lp = (struct list_pair *)GETBUF(len); ++ lp[0].index = cnt; /* maxcount */ ++ ++ cnt = do_maple_tree(symbol_value("sparse_irqs"), MAPLE_TREE_GATHER, lp); ++ ++ /* ++ * NOTE: We cannot use lp.index like Radix Tree or XArray because ++ * it's not an absolute index and just counter in Maple Tree. ++ */ ++ if (kt->highest_irq == 0) { ++ readmem((ulong)lp[cnt-1].value + ++ OFFSET(irq_desc_irq_data) + OFFSET(irq_data_irq), ++ KVADDR, &kt->highest_irq, sizeof(int), "irq_data.irq", ++ FAULT_ON_ERROR); ++ } ++ ++ for (c = 0; c < cnt; c++) { ++ readmem((ulong)lp[c].value + ++ OFFSET(irq_desc_irq_data) + OFFSET(irq_data_irq), ++ KVADDR, &i, sizeof(int), "irq_data.irq", FAULT_ON_ERROR); ++ if (i == irq) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "index: %d value: %lx\n", ++ i, (ulong)lp[c].value); ++ addr = (ulong)lp[c].value; ++ break; ++ } ++ } ++ FREEBUF(lp); ++ + } else if (kt->flags2 & (IRQ_DESC_TREE_RADIX|IRQ_DESC_TREE_XARRAY)) { + if (kt->highest_irq && (irq > kt->highest_irq)) + return addr; +@@ -6696,8 +6741,8 @@ get_irq_desc_addr(int irq) + FREEBUF(lp); + } else { + error(FATAL, +- "neither irq_desc, _irq_desc, irq_desc_ptrs " +- "or irq_desc_tree symbols exist\n"); ++ "neither irq_desc, _irq_desc, irq_desc_ptrs, " ++ "irq_desc_tree or sparse_irqs symbols exist\n"); + } + + return addr; +--- crash-8.0.2.orig/symbols.c ++++ crash-8.0.2/symbols.c +@@ -10364,6 +10364,7 @@ dump_offset_table(char *spec, ulong make + OFFSET(irq_desc_t_kstat_irqs)); + fprintf(fp, " irq_desc_t_affinity: %ld\n", + OFFSET(irq_desc_t_affinity)); ++ fprintf(fp, " irq_data_irq: %ld\n", OFFSET(irq_data_irq)); + fprintf(fp, " irq_data_chip: %ld\n", + OFFSET(irq_data_chip)); + fprintf(fp, " irq_data_affinity: %ld\n", +--- crash-8.0.2.orig/x86_64.c ++++ crash-8.0.2/x86_64.c +@@ -5369,7 +5369,8 @@ get_x86_64_frame(struct bt_info *bt, ulo + static void + x86_64_dump_irq(int irq) + { +- if (symbol_exists("irq_desc") || ++ if (kernel_symbol_exists("sparse_irqs") || ++ symbol_exists("irq_desc") || + kernel_symbol_exists("irq_desc_ptrs") || + kernel_symbol_exists("irq_desc_tree")) { + machdep->dump_irq = generic_dump_irq; +@@ -5383,7 +5384,8 @@ x86_64_dump_irq(int irq) + static void + x86_64_get_irq_affinity(int irq) + { +- if (symbol_exists("irq_desc") || ++ if (kernel_symbol_exists("sparse_irqs") || ++ symbol_exists("irq_desc") || + kernel_symbol_exists("irq_desc_ptrs") || + kernel_symbol_exists("irq_desc_tree")) { + machdep->get_irq_affinity = generic_get_irq_affinity; +@@ -5397,7 +5399,8 @@ x86_64_get_irq_affinity(int irq) + static void + x86_64_show_interrupts(int irq, ulong *cpus) + { +- if (symbol_exists("irq_desc") || ++ if (kernel_symbol_exists("sparse_irqs") || ++ symbol_exists("irq_desc") || + kernel_symbol_exists("irq_desc_ptrs") || + kernel_symbol_exists("irq_desc_tree")) { + machdep->show_interrupts = generic_show_interrupts; diff -Nru crash-8.0.2/debian/patches/lp2038249-0022-arm64-Fix-vtop-command-to-display-swap-information-o.patch crash-8.0.2/debian/patches/lp2038249-0022-arm64-Fix-vtop-command-to-display-swap-information-o.patch --- crash-8.0.2/debian/patches/lp2038249-0022-arm64-Fix-vtop-command-to-display-swap-information-o.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0022-arm64-Fix-vtop-command-to-display-swap-information-o.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,66 @@ +From: chenguanyou +Date: Thu Sep 14 15:35:50 2023 +0800 +Subject: arm64: Fix "vtop" command to display swap information on Linux 5.19 and later + +Kernel commit 570ef363509b ("arm64/pgtable: support +__HAVE_ARCH_PTE_SWP_EXCLUSIVE"), which is contained in Linux 5.19 and +later kernels, changed the format of swap entries on arm64. Without the +patch, the "vtop" command cannot display swap information. + +Before: + crash> vtop 70504000 + VIRTUAL PHYSICAL + 70504000 (not mapped) + + PAGE DIRECTORY: ffffff80f265c000 + PGD: ffffff80f265c008 => 800000141537003 + PMD: ffffff8101537c10 => 800000141538003 + PTE: ffffff8101538820 => 12bc3e04 + + PTE vtop: cannot determine swap location + +After: + crash> vtop 70504000 + VIRTUAL PHYSICAL + 70504000 (not mapped) + + PAGE DIRECTORY: ffffff80f265c000 + PGD: ffffff80f265c008 => 800000141537003 + PMD: ffffff8101537c10 => 800000141538003 + PTE: ffffff8101538820 => 12bc3e04 + + PTE SWAP OFFSET + 12bc3e04 /first_stage_ramdisk/dev/block/zram0 1227838 + + VMA START END FLAGS FILE + ffffff80dfe7b578 70504000 707bd000 100073 + + SWAP: /first_stage_ramdisk/dev/block/zram0 OFFSET: 1227838 + +Signed-off-by: chenguanyou +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/c9a732d0f6abe8c63f19fee5233544633dfd309f + +--- crash-8.0.2.orig/arm64.c ++++ crash-8.0.2/arm64.c +@@ -468,8 +468,16 @@ arm64_init(int when) + } + } + +- +- if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { ++ if (THIS_KERNEL_VERSION >= LINUX(5,19,0)) { ++ ms->__SWP_TYPE_BITS = 5; ++ ms->__SWP_TYPE_SHIFT = 3; ++ ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ++ ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ++ ms->__SWP_OFFSET_BITS = 50; ++ ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); ++ ms->PTE_PROT_NONE = (1UL << 58); ++ ms->PTE_FILE = 0; /* unused */ ++ } else if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { + ms->__SWP_TYPE_BITS = 6; + ms->__SWP_TYPE_SHIFT = 2; + ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); diff -Nru crash-8.0.2/debian/patches/lp2038249-0023-Fix-rd-command-to-display-data-on-zram-on-Linux-5.17.patch crash-8.0.2/debian/patches/lp2038249-0023-Fix-rd-command-to-display-data-on-zram-on-Linux-5.17.patch --- crash-8.0.2/debian/patches/lp2038249-0023-Fix-rd-command-to-display-data-on-zram-on-Linux-5.17.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0023-Fix-rd-command-to-display-data-on-zram-on-Linux-5.17.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,195 @@ +From: chenguanyou +Date: Mon Sep 11 20:59:39 2023 +0800 +Subject: Fix "rd" command to display data on zram on Linux 5.17 and later + +Fix "rd" command to display data on zram on Linux 5.17 and later kernels +that contain commits + a41ec880aa7b ("zsmalloc: move huge compressed obj from page to zspage"), + ffedd09fa9b0 ("zsmalloc: Stop using slab fields in struct page"), +and on Linux 6.1 and later that contain commit + f635725c3905 ("zram: do not waste zram_table_entry flags bits"). + +Also, fix a bug that sets the same "byte" by memset() instead to pages +containing the same "unsigned long" elements. + +Before: + crash> mod -s zram zram.ko + MODULE NAME BASE SIZE OBJECT FILE + ffffffde224db800 zram ffffffde224d2000 57344 zram.ko + crash> mod -s zsmalloc zsmalloc.ko + MODULE NAME BASE SIZE OBJECT FILE + ffffffde224c5180 zsmalloc ffffffde224bf000 40960 zsmalloc.ko + crash> rd 0x13d89fb0 + rd: zspage magic incorrect: b0 + +After: + crash> rd 0x13d89fb0 + 13d89fb0: c2b54f7170883b20 ;.pqO.. + +Signed-off-by: chenguanyou +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: upstream, https://github.com/crash-utility/crash/commit/0172e35083b545fa7dd640fa5de0111f8474fc14 + +--- crash-8.0.2.orig/defs.h ++++ crash-8.0.2/defs.h +@@ -2208,6 +2208,7 @@ struct offset_table { + long module_memory_base; + long module_memory_size; + long irq_data_irq; ++ long zspage_huge; + }; + + struct size_table { /* stash of commonly-used sizes */ +@@ -6915,8 +6916,7 @@ ulong try_zram_decompress(ulonglong pte_ + #define SECTOR_SHIFT 9 + #define SECTORS_PER_PAGE_SHIFT (PAGESHIFT() - SECTOR_SHIFT) + #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) +-#define ZRAM_FLAG_SHIFT (1<<24) +-#define ZRAM_FLAG_SAME_BIT (1<<25) ++ + struct zspage { + struct { + unsigned int fullness : 2; +@@ -6928,6 +6928,18 @@ struct zspage { + unsigned int freeobj; + }; + ++struct zspage_5_17 { ++ struct { ++ unsigned int huge : 1; ++ unsigned int fullness : 2; ++ unsigned int class : 9; ++ unsigned int isolated : 3; ++ unsigned int magic : 8; ++ }; ++ unsigned int inuse; ++ unsigned int freeobj; ++}; ++ + /* + * makedumpfile.c + */ +--- crash-8.0.2.orig/diskdump.c ++++ crash-8.0.2/diskdump.c +@@ -2672,15 +2672,36 @@ diskdump_device_dump_info(FILE *ofp) + } + } + ++static ulong ZRAM_FLAG_SHIFT; ++static ulong ZRAM_FLAG_SAME_BIT; ++ + static void + zram_init(void) + { ++ long zram_flag_shift; ++ + MEMBER_OFFSET_INIT(zram_mempoll, "zram", "mem_pool"); + MEMBER_OFFSET_INIT(zram_compressor, "zram", "compressor"); + MEMBER_OFFSET_INIT(zram_table_flag, "zram_table_entry", "flags"); + if (INVALID_MEMBER(zram_table_flag)) + MEMBER_OFFSET_INIT(zram_table_flag, "zram_table_entry", "value"); + STRUCT_SIZE_INIT(zram_table_entry, "zram_table_entry"); ++ MEMBER_OFFSET_INIT(zspoll_size_class, "zs_pool", "size_class"); ++ MEMBER_OFFSET_INIT(size_class_size, "size_class", "size"); ++ MEMBER_OFFSET_INIT(zspage_huge, "zspage", "huge"); ++ ++ if (enumerator_value("ZRAM_LOCK", &zram_flag_shift)) ++ ; ++ else if (THIS_KERNEL_VERSION >= LINUX(6,1,0)) ++ zram_flag_shift = PAGESHIFT() + 1; ++ else ++ zram_flag_shift = 24; ++ ++ ZRAM_FLAG_SHIFT = 1 << zram_flag_shift; ++ ZRAM_FLAG_SAME_BIT = 1 << (zram_flag_shift+1); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "zram_flag_shift: %ld\n", zram_flag_shift); + } + + static unsigned char * +@@ -2688,9 +2709,11 @@ zram_object_addr(ulong pool, ulong handl + { + ulong obj, off, class, page, zspage; + struct zspage zspage_s; ++ struct zspage_5_17 zspage_5_17_s; + physaddr_t paddr; + unsigned int obj_idx, class_idx, size; + ulong pages[2], sizes[2]; ++ ulong zs_magic; + + readmem(handle, KVADDR, &obj, sizeof(void *), "zram entry", FAULT_ON_ERROR); + obj >>= OBJ_TAG_BITS; +@@ -2699,11 +2722,20 @@ zram_object_addr(ulong pool, ulong handl + + readmem(page + OFFSET(page_private), KVADDR, &zspage, + sizeof(void *), "page_private", FAULT_ON_ERROR); +- readmem(zspage, KVADDR, &zspage_s, sizeof(struct zspage), "zspage", FAULT_ON_ERROR); + +- class_idx = zspage_s.class; +- if (zspage_s.magic != ZSPAGE_MAGIC) +- error(FATAL, "zspage magic incorrect: %x\n", zspage_s.magic); ++ if (VALID_MEMBER(zspage_huge)) { ++ readmem(zspage, KVADDR, &zspage_5_17_s, ++ sizeof(struct zspage_5_17), "zspage_5_17", FAULT_ON_ERROR); ++ class_idx = zspage_5_17_s.class; ++ zs_magic = zspage_5_17_s.magic; ++ } else { ++ readmem(zspage, KVADDR, &zspage_s, sizeof(struct zspage), "zspage", FAULT_ON_ERROR); ++ class_idx = zspage_s.class; ++ zs_magic = zspage_s.magic; ++ } ++ ++ if (zs_magic != ZSPAGE_MAGIC) ++ error(FATAL, "zspage magic incorrect: %x\n", zs_magic); + + class = pool + OFFSET(zspoll_size_class); + class += (class_idx * sizeof(void *)); +@@ -2721,8 +2753,13 @@ zram_object_addr(ulong pool, ulong handl + } + + pages[0] = page; +- readmem(page + OFFSET(page_freelist), KVADDR, &pages[1], ++ if (VALID_MEMBER(page_freelist)) ++ readmem(page + OFFSET(page_freelist), KVADDR, &pages[1], + sizeof(void *), "page_freelist", FAULT_ON_ERROR); ++ else ++ readmem(page + OFFSET(page_index), KVADDR, &pages[1], ++ sizeof(void *), "page_index", FAULT_ON_ERROR); ++ + sizes[0] = PAGESIZE() - off; + sizes[1] = size - sizes[0]; + if (!is_page_ptr(pages[0], &paddr)) { +@@ -2739,9 +2776,13 @@ zram_object_addr(ulong pool, ulong handl + readmem(paddr, PHYSADDR, zram_buf + sizes[0], sizes[1], "zram buffer[1]", FAULT_ON_ERROR); + + out: +- readmem(page, KVADDR, &obj, sizeof(void *), "page flags", FAULT_ON_ERROR); +- if (!(obj & (1<<10))) { //PG_OwnerPriv1 flag +- return (zram_buf + ZS_HANDLE_SIZE); ++ if (VALID_MEMBER(zspage_huge)) { ++ if (!zspage_5_17_s.huge) ++ return (zram_buf + ZS_HANDLE_SIZE); ++ } else { ++ readmem(page, KVADDR, &obj, sizeof(void *), "page flags", FAULT_ON_ERROR); ++ if (!(obj & (1<<10))) // PG_OwnerPriv1 flag ++ return (zram_buf + ZS_HANDLE_SIZE); + } + + return zram_buf; +@@ -2920,7 +2961,12 @@ try_zram_decompress(ulonglong pte_val, u + readmem(zram_table_entry + OFFSET(zram_table_flag), KVADDR, &flags, + sizeof(void *), "zram_table_flag", FAULT_ON_ERROR); + if (!entry || (flags & ZRAM_FLAG_SAME_BIT)) { +- memset(buf, entry, len); ++ ulong *same_buf = (ulong *)GETBUF(PAGESIZE()); ++ for (int count = 0; count < PAGESIZE() / sizeof(ulong); count++) { ++ same_buf[count] = entry; ++ } ++ memcpy(buf, same_buf + off, len); ++ FREEBUF(same_buf); + goto out; + } + size = flags & (ZRAM_FLAG_SHIFT -1); diff -Nru crash-8.0.2/debian/patches/lp2038249-0024-Fix-compilation-error-and-warning-with-gcc-4.8.5.patch crash-8.0.2/debian/patches/lp2038249-0024-Fix-compilation-error-and-warning-with-gcc-4.8.5.patch --- crash-8.0.2/debian/patches/lp2038249-0024-Fix-compilation-error-and-warning-with-gcc-4.8.5.patch 1970-01-01 00:00:00.000000000 +0000 +++ crash-8.0.2/debian/patches/lp2038249-0024-Fix-compilation-error-and-warning-with-gcc-4.8.5.patch 2024-01-04 06:47:25.000000000 +0000 @@ -0,0 +1,3865 @@ +From: Kazuhito Hagio +Date: Thu Oct 26 12:02:35 2023 +0900 +Subject: Fix compilation error and warning with gcc-4.8.5 + +Fix the following compilation error in diskdump.c and warning in +xen_hyper.c with gcc-4.8.5 (e.g. RHEL7). + +- diskdump.c: In function 'try_zram_decompress': + diskdump.c:3048:3: error: 'for' loop initial declarations are only allowed in C99 mode + for (int count = 0; count < PAGESIZE() / sizeof(ulong); count++) { + ^ + diskdump.c:3048:3: note: use option -std=c99 or -std=gnu99 to compile your code + make[4]: *** [diskdump.o] Error 1 + make[3]: *** [gdb] Error 2 + make[2]: *** [rebuild] Error 2 + make[1]: *** [gdb_merge] Error 2 + make: *** [all] Error 2 + +- xen_hyper.c: In function 'xen_hyper_x86_pcpu_init': + xen_hyper.c:387:36: warning: 'init_tss' may be used uninitialized in this function [-Wmaybe-uninitialized] + xen_hyper_store_pcpu_context_tss(pcc, init_tss, buf); + ^ + +Fixes: 0172e35083b5 ("Fix "rd" command to display data on zram on Linux 5.17 and later") +Fixes: 9ee564cd1a46 ("xen: get stack address via stack_base array if available") +Signed-off-by: Kazuhito Hagio + +Bug-Ubuntu: https://launchpad.net/bugs/2038249 +Origin: backport, https://github.com/crash-utility/crash/commit/55a43bcefa20161c7e56ed0e309e90e941f47efc +[chengen - initialize init_tss to zero in xen_hyper.c] + +--- crash-8.0.2.orig/diskdump.c ++++ crash-8.0.2/diskdump.c +@@ -2961,8 +2961,9 @@ try_zram_decompress(ulonglong pte_val, u + readmem(zram_table_entry + OFFSET(zram_table_flag), KVADDR, &flags, + sizeof(void *), "zram_table_flag", FAULT_ON_ERROR); + if (!entry || (flags & ZRAM_FLAG_SAME_BIT)) { ++ int count; + ulong *same_buf = (ulong *)GETBUF(PAGESIZE()); +- for (int count = 0; count < PAGESIZE() / sizeof(ulong); count++) { ++ for (count = 0; count < PAGESIZE() / sizeof(ulong); count++) { + same_buf[count] = entry; + } + memcpy(buf, same_buf + off, len); +--- crash-8.0.2.orig/xen_hyper.c ++++ crash-8.0.2/xen_hyper.c +@@ -34,368 +34,394 @@ static void xen_hyper_schedule_init(void + /* + * Do initialization for Xen Hyper system here. + */ +-void +-xen_hyper_init(void) +-{ +- char *buf; ++void xen_hyper_init(void) { ++ char *buf; + #if defined(X86) || defined(X86_64) +- long member_offset; ++ long member_offset; + #endif + + #ifdef X86_64 +- xht->xen_virt_start = symbol_value("start"); ++ xht->xen_virt_start = symbol_value("start"); + +- /* +- * Xen virtual mapping is aligned to 1 GiB boundary. +- * Image starts no more than 1 GiB below +- * beginning of virtual address space. +- */ +- xht->xen_virt_start &= 0xffffffffc0000000; ++ /* ++ * Xen virtual mapping is aligned to 1 GiB boundary. ++ * Image starts no more than 1 GiB below ++ * beginning of virtual address space. ++ */ ++ xht->xen_virt_start &= 0xffffffffc0000000; + #endif + +- if (machine_type("X86_64") && +- symbol_exists("xen_phys_start") && !xen_phys_start()) +- error(WARNING, +- "This hypervisor is relocatable; if initialization fails below, try\n" +- " using the \"--xen_phys_start
\" command line option.\n\n"); +- +- if (symbol_exists("crashing_cpu")) { +- get_symbol_data("crashing_cpu", sizeof(xht->crashing_cpu), +- &xht->crashing_cpu); +- } else { +- xht->crashing_cpu = XEN_HYPER_PCPU_ID_INVALID; +- } +- machdep->get_smp_cpus(); +- machdep->memory_size(); +- +- if (symbol_exists("__per_cpu_offset")) { +- xht->flags |= XEN_HYPER_SMP; +- if((xht->__per_cpu_offset = malloc(sizeof(ulong) * XEN_HYPER_MAX_CPUS())) == NULL) { +- error(FATAL, "cannot malloc __per_cpu_offset space.\n"); +- } +- if (!readmem(symbol_value("__per_cpu_offset"), KVADDR, +- xht->__per_cpu_offset, sizeof(ulong) * XEN_HYPER_MAX_CPUS(), +- "__per_cpu_offset", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read __per_cpu_offset.\n"); +- } +- } ++ if (machine_type("X86_64") && symbol_exists("xen_phys_start") && ++ !xen_phys_start()) ++ error(WARNING, ++ "This hypervisor is relocatable; if initialization fails below, try\n" ++ " using the \"--xen_phys_start
\" command line " ++ "option.\n\n"); ++ ++ if (symbol_exists("crashing_cpu")) { ++ get_symbol_data("crashing_cpu", sizeof(xht->crashing_cpu), ++ &xht->crashing_cpu); ++ } else { ++ xht->crashing_cpu = XEN_HYPER_PCPU_ID_INVALID; ++ } ++ machdep->get_smp_cpus(); ++ machdep->memory_size(); ++ ++ if (symbol_exists("__per_cpu_offset")) { ++ xht->flags |= XEN_HYPER_SMP; ++ if ((xht->__per_cpu_offset = ++ malloc(sizeof(ulong) * XEN_HYPER_MAX_CPUS())) == NULL) { ++ error(FATAL, "cannot malloc __per_cpu_offset space.\n"); ++ } ++ if (!readmem(symbol_value("__per_cpu_offset"), KVADDR, ++ xht->__per_cpu_offset, sizeof(ulong) * XEN_HYPER_MAX_CPUS(), ++ "__per_cpu_offset", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read __per_cpu_offset.\n"); ++ } ++ } + + #if defined(X86) || defined(X86_64) +- if (symbol_exists("__per_cpu_shift")) { +- xht->percpu_shift = (int)symbol_value("__per_cpu_shift"); +- } else if (xen_major_version() >= 3 && xen_minor_version() >= 3) { +- xht->percpu_shift = 13; +- } else { +- xht->percpu_shift = 12; +- } +- member_offset = MEMBER_OFFSET("cpuinfo_x86", "x86_model_id"); +- buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_x86)); +- if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { +- xen_hyper_x86_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf); +- } else { +- xen_hyper_x86_fill_cpu_data(xht->cpu_idxs[0], buf); +- } +- strncpy(xht->utsname.machine, (char *)(buf + member_offset), +- sizeof(xht->utsname.machine)-1); +- FREEBUF(buf); ++ if (symbol_exists("__per_cpu_shift")) { ++ xht->percpu_shift = (int)symbol_value("__per_cpu_shift"); ++ } else if (xen_major_version() >= 3 && xen_minor_version() >= 3) { ++ xht->percpu_shift = 13; ++ } else { ++ xht->percpu_shift = 12; ++ } ++ member_offset = MEMBER_OFFSET("cpuinfo_x86", "x86_model_id"); ++ buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_x86)); ++ if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { ++ xen_hyper_x86_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf); ++ } else { ++ xen_hyper_x86_fill_cpu_data(xht->cpu_idxs[0], buf); ++ } ++ strncpy(xht->utsname.machine, (char *)(buf + member_offset), ++ sizeof(xht->utsname.machine) - 1); ++ FREEBUF(buf); + #elif defined(IA64) +- buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_ia64)); +- if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { +- xen_hyper_ia64_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf); +- } else { +- xen_hyper_ia64_fill_cpu_data(xht->cpu_idxs[0], buf); +- } +- strncpy(xht->utsname.machine, (char *)(buf + XEN_HYPER_OFFSET(cpuinfo_ia64_vendor)), +- sizeof(xht->utsname.machine)-1); +- FREEBUF(buf); ++ buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_ia64)); ++ if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { ++ xen_hyper_ia64_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf); ++ } else { ++ xen_hyper_ia64_fill_cpu_data(xht->cpu_idxs[0], buf); ++ } ++ strncpy(xht->utsname.machine, ++ (char *)(buf + XEN_HYPER_OFFSET(cpuinfo_ia64_vendor)), ++ sizeof(xht->utsname.machine) - 1); ++ FREEBUF(buf); + #endif + + #ifndef IA64 +- XEN_HYPER_STRUCT_SIZE_INIT(note_buf_t, "note_buf_t"); +- XEN_HYPER_STRUCT_SIZE_INIT(crash_note_t, "crash_note_t"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_core, "crash_note_t", "core"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen, "crash_note_t", "xen"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_regs, "crash_note_t", "xen_regs"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_info, "crash_note_t", "xen_info"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(crash_note_core_t, "crash_note_core_t"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_note, "crash_note_core_t", "note"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_desc, "crash_note_core_t", "desc"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_t, "crash_note_xen_t"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_note, "crash_note_xen_t", "note"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_desc, "crash_note_xen_t", "desc"); +- XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_core_t, "crash_note_xen_core_t"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_note, "crash_note_xen_core_t", "note"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_desc, "crash_note_xen_core_t", "desc"); +- XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_info_t, "crash_note_xen_info_t"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_note, "crash_note_xen_info_t", "note"); +- XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_desc, "crash_note_xen_info_t", "desc"); +- XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_core_t, "crash_xen_core_t"); +- XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_info_t, "crash_xen_info_t"); +- XEN_HYPER_STRUCT_SIZE_INIT(xen_crash_xen_regs_t, "xen_crash_xen_regs_t"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(ELF_Prstatus,"ELF_Prstatus"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_info, "ELF_Prstatus", "pr_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cursig, "ELF_Prstatus", "pr_cursig"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sigpend, "ELF_Prstatus", "pr_sigpend"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sighold, "ELF_Prstatus", "pr_sighold"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pid, "ELF_Prstatus", "pr_pid"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_ppid, "ELF_Prstatus", "pr_ppid"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pgrp, "ELF_Prstatus", "pr_pgrp"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sid, "ELF_Prstatus", "pr_sid"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_utime, "ELF_Prstatus", "pr_utime"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_stime, "ELF_Prstatus", "pr_stime"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cutime, "ELF_Prstatus", "pr_cutime"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cstime, "ELF_Prstatus", "pr_cstime"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_reg, "ELF_Prstatus", "pr_reg"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_fpvalid, "ELF_Prstatus", "pr_fpvalid"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_sec, "ELF_Timeval", "tv_sec"); +- XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_usec, "ELF_Timeval", "tv_usec"); +- XEN_HYPER_STRUCT_SIZE_INIT(ELF_Signifo,"ELF_Signifo"); +- XEN_HYPER_STRUCT_SIZE_INIT(ELF_Gregset,"ELF_Gregset"); +- XEN_HYPER_STRUCT_SIZE_INIT(ELF_Timeval,"ELF_Timeval"); ++ XEN_HYPER_STRUCT_SIZE_INIT(note_buf_t, "note_buf_t"); ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_note_t, "crash_note_t"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_core, "crash_note_t", "core"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen, "crash_note_t", "xen"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_regs, "crash_note_t", ++ "xen_regs"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_info, "crash_note_t", ++ "xen_info"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_note_core_t, "crash_note_core_t"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_note, "crash_note_core_t", ++ "note"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_desc, "crash_note_core_t", ++ "desc"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_t, "crash_note_xen_t"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_note, "crash_note_xen_t", ++ "note"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_desc, "crash_note_xen_t", ++ "desc"); ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_core_t, "crash_note_xen_core_t"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_note, ++ "crash_note_xen_core_t", "note"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_desc, ++ "crash_note_xen_core_t", "desc"); ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_info_t, "crash_note_xen_info_t"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_note, ++ "crash_note_xen_info_t", "note"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_desc, ++ "crash_note_xen_info_t", "desc"); ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_core_t, "crash_xen_core_t"); ++ XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_info_t, "crash_xen_info_t"); ++ XEN_HYPER_STRUCT_SIZE_INIT(xen_crash_xen_regs_t, "xen_crash_xen_regs_t"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(ELF_Prstatus, "ELF_Prstatus"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_info, "ELF_Prstatus", "pr_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cursig, "ELF_Prstatus", ++ "pr_cursig"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sigpend, "ELF_Prstatus", ++ "pr_sigpend"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sighold, "ELF_Prstatus", ++ "pr_sighold"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pid, "ELF_Prstatus", "pr_pid"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_ppid, "ELF_Prstatus", "pr_ppid"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pgrp, "ELF_Prstatus", "pr_pgrp"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sid, "ELF_Prstatus", "pr_sid"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_utime, "ELF_Prstatus", ++ "pr_utime"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_stime, "ELF_Prstatus", ++ "pr_stime"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cutime, "ELF_Prstatus", ++ "pr_cutime"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cstime, "ELF_Prstatus", ++ "pr_cstime"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_reg, "ELF_Prstatus", "pr_reg"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_fpvalid, "ELF_Prstatus", ++ "pr_fpvalid"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_sec, "ELF_Timeval", "tv_sec"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_usec, "ELF_Timeval", "tv_usec"); ++ XEN_HYPER_STRUCT_SIZE_INIT(ELF_Signifo, "ELF_Signifo"); ++ XEN_HYPER_STRUCT_SIZE_INIT(ELF_Gregset, "ELF_Gregset"); ++ XEN_HYPER_STRUCT_SIZE_INIT(ELF_Timeval, "ELF_Timeval"); + #endif +- XEN_HYPER_STRUCT_SIZE_INIT(domain, "domain"); +- XEN_HYPER_STRUCT_SIZE_INIT(vcpu, "vcpu"); ++ XEN_HYPER_STRUCT_SIZE_INIT(domain, "domain"); ++ XEN_HYPER_STRUCT_SIZE_INIT(vcpu, "vcpu"); + #ifndef IA64 +- XEN_HYPER_STRUCT_SIZE_INIT(cpu_info, "cpu_info"); ++ XEN_HYPER_STRUCT_SIZE_INIT(cpu_info, "cpu_info"); + #endif +- XEN_HYPER_STRUCT_SIZE_INIT(cpu_user_regs, "cpu_user_regs"); ++ XEN_HYPER_STRUCT_SIZE_INIT(cpu_user_regs, "cpu_user_regs"); + +- xht->idle_vcpu_size = get_array_length("idle_vcpu", NULL, 0); +- xht->idle_vcpu_array = (ulong *)malloc(xht->idle_vcpu_size * sizeof(ulong)); +- if (xht->idle_vcpu_array == NULL) { +- error(FATAL, "cannot malloc idle_vcpu_array space.\n"); +- } +- if (!readmem(symbol_value("idle_vcpu"), KVADDR, xht->idle_vcpu_array, +- xht->idle_vcpu_size * sizeof(ulong), "idle_vcpu_array", +- RETURN_ON_ERROR)) { +- error(FATAL, "cannot read idle_vcpu array.\n"); +- } +- +- /* +- * Do some initialization. +- */ ++ xht->idle_vcpu_size = get_array_length("idle_vcpu", NULL, 0); ++ xht->idle_vcpu_array = (ulong *)malloc(xht->idle_vcpu_size * sizeof(ulong)); ++ if (xht->idle_vcpu_array == NULL) { ++ error(FATAL, "cannot malloc idle_vcpu_array space.\n"); ++ } ++ if (!readmem(symbol_value("idle_vcpu"), KVADDR, xht->idle_vcpu_array, ++ xht->idle_vcpu_size * sizeof(ulong), "idle_vcpu_array", ++ RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read idle_vcpu array.\n"); ++ } ++ ++ /* ++ * Do some initialization. ++ */ + #ifndef IA64 +- xen_hyper_dumpinfo_init(); ++ xen_hyper_dumpinfo_init(); + #endif +- xhmachdep->pcpu_init(); +- xen_hyper_domain_init(); +- xen_hyper_vcpu_init(); +- xen_hyper_misc_init(); +- /* +- * xen_hyper_post_init() have to be called after all initialize +- * functions finished. +- */ +- xen_hyper_post_init(); ++ xhmachdep->pcpu_init(); ++ xen_hyper_domain_init(); ++ xen_hyper_vcpu_init(); ++ xen_hyper_misc_init(); ++ /* ++ * xen_hyper_post_init() have to be called after all initialize ++ * functions finished. ++ */ ++ xen_hyper_post_init(); + } + + /* + * Do initialization for Domain of Xen Hyper system here. + */ +-void +-xen_hyper_domain_init(void) +-{ +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_id, "domain", "domain_id"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_tot_pages, "domain", "tot_pages"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_pages, "domain", "max_pages"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_xenheap_pages, "domain", "xenheap_pages"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_shared_info, "domain", "shared_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_sched_priv, "domain", "sched_priv"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_next_in_list, "domain", "next_in_list"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_flags, "domain", "domain_flags"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_evtchn, "domain", "evtchn"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_hvm, "domain", "is_hvm"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_guest_type, "domain", "guest_type"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_privileged, "domain", "is_privileged"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_debugger_attached, "domain", "debugger_attached"); +- +- /* +- * Will be removed in Xen 4.4 (hg ae9b223a675d), +- * need to check that with XEN_HYPER_VALID_MEMBER() before using +- */ +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_polling, "domain", "is_polling"); +- +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_dying, "domain", "is_dying"); +- /* +- * With Xen 4.2.5 is_paused_by_controller changed to +- * controller_pause_count. +- */ +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_paused_by_controller, "domain", "is_paused_by_controller"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_controller_pause_count, "domain", "controller_pause_count"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shutting_down, "domain", "is_shutting_down"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shut_down, "domain", "is_shut_down"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_vcpu, "domain", "vcpu"); +- XEN_HYPER_MEMBER_SIZE_INIT(domain_vcpu, "domain", "vcpu"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_vcpus, "domain", "max_vcpus"); +- XEN_HYPER_MEMBER_OFFSET_INIT(domain_arch, "domain", "arch"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(arch_shared_info, "arch_shared_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_max_pfn, "arch_shared_info", "max_pfn"); +- XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_pfn_to_mfn_frame_list_list, "arch_shared_info", "pfn_to_mfn_frame_list_list"); +- XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_nmi_reason, "arch_shared_info", "nmi_reason"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(shared_info, "shared_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_vcpu_info, "shared_info", "vcpu_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_pending, "shared_info", "evtchn_pending"); +- XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_mask, "shared_info", "evtchn_mask"); +- XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_arch, "shared_info", "arch"); ++void xen_hyper_domain_init(void) { ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_id, "domain", "domain_id"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_tot_pages, "domain", "tot_pages"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_pages, "domain", "max_pages"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_xenheap_pages, "domain", "xenheap_pages"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_shared_info, "domain", "shared_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_sched_priv, "domain", "sched_priv"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_next_in_list, "domain", "next_in_list"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_flags, "domain", "domain_flags"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_evtchn, "domain", "evtchn"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_hvm, "domain", "is_hvm"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_guest_type, "domain", "guest_type"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_privileged, "domain", "is_privileged"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_debugger_attached, "domain", ++ "debugger_attached"); ++ ++ /* ++ * Will be removed in Xen 4.4 (hg ae9b223a675d), ++ * need to check that with XEN_HYPER_VALID_MEMBER() before using ++ */ ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_polling, "domain", "is_polling"); ++ ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_dying, "domain", "is_dying"); ++ /* ++ * With Xen 4.2.5 is_paused_by_controller changed to ++ * controller_pause_count. ++ */ ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_paused_by_controller, "domain", ++ "is_paused_by_controller"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_controller_pause_count, "domain", ++ "controller_pause_count"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shutting_down, "domain", ++ "is_shutting_down"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shut_down, "domain", "is_shut_down"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_vcpu, "domain", "vcpu"); ++ XEN_HYPER_MEMBER_SIZE_INIT(domain_vcpu, "domain", "vcpu"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_vcpus, "domain", "max_vcpus"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(domain_arch, "domain", "arch"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(arch_shared_info, "arch_shared_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_max_pfn, "arch_shared_info", ++ "max_pfn"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_pfn_to_mfn_frame_list_list, ++ "arch_shared_info", ++ "pfn_to_mfn_frame_list_list"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_nmi_reason, "arch_shared_info", ++ "nmi_reason"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(shared_info, "shared_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_vcpu_info, "shared_info", ++ "vcpu_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_pending, "shared_info", ++ "evtchn_pending"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_mask, "shared_info", ++ "evtchn_mask"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_arch, "shared_info", "arch"); + +- XEN_HYPER_STRUCT_SIZE_INIT(arch_domain, "arch_domain"); ++ XEN_HYPER_STRUCT_SIZE_INIT(arch_domain, "arch_domain"); + #ifdef IA64 +- XEN_HYPER_MEMBER_OFFSET_INIT(arch_domain_mm, "arch_domain", "mm"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(arch_domain_mm, "arch_domain", "mm"); + +- XEN_HYPER_STRUCT_SIZE_INIT(mm_struct, "mm_struct"); +- XEN_HYPER_MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); ++ XEN_HYPER_STRUCT_SIZE_INIT(mm_struct, "mm_struct"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); + #endif + +- if((xhdt->domain_struct = malloc(XEN_HYPER_SIZE(domain))) == NULL) { +- error(FATAL, "cannot malloc domain struct space.\n"); +- } +- if((xhdt->domain_struct_verify = malloc(XEN_HYPER_SIZE(domain))) == NULL) { +- error(FATAL, "cannot malloc domain struct space to verification.\n"); +- } +- xen_hyper_refresh_domain_context_space(); +- xhdt->flags |= XEN_HYPER_DOMAIN_F_INIT; ++ if ((xhdt->domain_struct = malloc(XEN_HYPER_SIZE(domain))) == NULL) { ++ error(FATAL, "cannot malloc domain struct space.\n"); ++ } ++ if ((xhdt->domain_struct_verify = malloc(XEN_HYPER_SIZE(domain))) == NULL) { ++ error(FATAL, "cannot malloc domain struct space to verification.\n"); ++ } ++ xen_hyper_refresh_domain_context_space(); ++ xhdt->flags |= XEN_HYPER_DOMAIN_F_INIT; + } + + /* + * Do initialization for vcpu of Xen Hyper system here. + */ +-void +-xen_hyper_vcpu_init(void) +-{ +- XEN_HYPER_STRUCT_SIZE_INIT(timer, "timer"); +- XEN_HYPER_MEMBER_OFFSET_INIT(timer_expires, "timer", "expires"); +- XEN_HYPER_MEMBER_OFFSET_INIT(timer_cpu, "timer", "cpu"); +- XEN_HYPER_MEMBER_OFFSET_INIT(timer_function, "timer", "function"); +- XEN_HYPER_MEMBER_OFFSET_INIT(timer_data, "timer", "data"); +- XEN_HYPER_MEMBER_OFFSET_INIT(timer_heap_offset, "timer", "heap_offset"); +- XEN_HYPER_MEMBER_OFFSET_INIT(timer_killed, "timer", "killed"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(vcpu_runstate_info, "vcpu_runstate_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state, "vcpu_runstate_info", "state"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state_entry_time, "vcpu_runstate_info", "state_entry_time"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_time, "vcpu_runstate_info", "time"); +- +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_id, "vcpu", "vcpu_id"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_processor, "vcpu", "processor"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_info, "vcpu", "vcpu_info"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_domain, "vcpu", "domain"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_next_in_list, "vcpu", "next_in_list"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_timer, "vcpu", "timer"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sleep_tick, "vcpu", "sleep_tick"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_poll_timer, "vcpu", "poll_timer"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sched_priv, "vcpu", "sched_priv"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate, "vcpu", "runstate"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_guest, "vcpu", "runstate_guest"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_flags, "vcpu", "vcpu_flags"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_pause_count, "vcpu", "pause_count"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_virq_to_evtchn, "vcpu", "virq_to_evtchn"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_cpu_affinity, "vcpu", "cpu_affinity"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_nmi_addr, "vcpu", "nmi_addr"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_dirty_cpumask, "vcpu", "vcpu_dirty_cpumask"); +- XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_arch, "vcpu", "arch"); ++void xen_hyper_vcpu_init(void) { ++ XEN_HYPER_STRUCT_SIZE_INIT(timer, "timer"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(timer_expires, "timer", "expires"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(timer_cpu, "timer", "cpu"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(timer_function, "timer", "function"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(timer_data, "timer", "data"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(timer_heap_offset, "timer", "heap_offset"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(timer_killed, "timer", "killed"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(vcpu_runstate_info, "vcpu_runstate_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state, "vcpu_runstate_info", ++ "state"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state_entry_time, ++ "vcpu_runstate_info", "state_entry_time"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_time, "vcpu_runstate_info", ++ "time"); ++ ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_id, "vcpu", "vcpu_id"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_processor, "vcpu", "processor"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_info, "vcpu", "vcpu_info"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_domain, "vcpu", "domain"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_next_in_list, "vcpu", "next_in_list"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_timer, "vcpu", "timer"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sleep_tick, "vcpu", "sleep_tick"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_poll_timer, "vcpu", "poll_timer"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sched_priv, "vcpu", "sched_priv"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate, "vcpu", "runstate"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_guest, "vcpu", "runstate_guest"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_flags, "vcpu", "vcpu_flags"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_pause_count, "vcpu", "pause_count"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_virq_to_evtchn, "vcpu", "virq_to_evtchn"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_cpu_affinity, "vcpu", "cpu_affinity"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_nmi_addr, "vcpu", "nmi_addr"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_dirty_cpumask, "vcpu", ++ "vcpu_dirty_cpumask"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_arch, "vcpu", "arch"); + + #ifdef IA64 +- XEN_HYPER_ASSIGN_OFFSET(vcpu_thread_ksp) = +- MEMBER_OFFSET("vcpu", "arch") + MEMBER_OFFSET("arch_vcpu", "_thread") + +- MEMBER_OFFSET("thread_struct", "ksp"); ++ XEN_HYPER_ASSIGN_OFFSET(vcpu_thread_ksp) = ++ MEMBER_OFFSET("vcpu", "arch") + MEMBER_OFFSET("arch_vcpu", "_thread") + ++ MEMBER_OFFSET("thread_struct", "ksp"); + #endif + +- if((xhvct->vcpu_struct = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) { +- error(FATAL, "cannot malloc vcpu struct space.\n"); +- } +- if((xhvct->vcpu_struct_verify = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) { +- error(FATAL, "cannot malloc vcpu struct space to verification.\n"); +- } +- +- xen_hyper_refresh_vcpu_context_space(); +- xhvct->flags |= XEN_HYPER_VCPU_F_INIT; +- xhvct->idle_vcpu = symbol_value("idle_vcpu"); ++ if ((xhvct->vcpu_struct = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) { ++ error(FATAL, "cannot malloc vcpu struct space.\n"); ++ } ++ if ((xhvct->vcpu_struct_verify = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) { ++ error(FATAL, "cannot malloc vcpu struct space to verification.\n"); ++ } ++ ++ xen_hyper_refresh_vcpu_context_space(); ++ xhvct->flags |= XEN_HYPER_VCPU_F_INIT; ++ xhvct->idle_vcpu = symbol_value("idle_vcpu"); + } + + /* + * Do initialization for pcpu of Xen Hyper system here. + */ + #if defined(X86) || defined(X86_64) +-void +-xen_hyper_x86_pcpu_init(void) +-{ +- ulong cpu_info; +- ulong init_tss_base, init_tss; +- ulong sp; +- struct xen_hyper_pcpu_context *pcc; +- char *buf, *bp; +- int i, cpuid; +- int flag; +- +- XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_guest_cpu_user_regs, "cpu_info", "guest_cpu_user_regs"); +- XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_processor_id, "cpu_info", "processor_id"); +- XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_current_vcpu, "cpu_info", "current_vcpu"); +- +- if((xhpct->pcpu_struct = malloc(XEN_HYPER_SIZE(cpu_info))) == NULL) { +- error(FATAL, "cannot malloc pcpu struct space.\n"); +- } +- /* get physical cpu context */ +- xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS()); +- if (symbol_exists("per_cpu__init_tss")) { +- init_tss_base = symbol_value("per_cpu__init_tss"); +- flag = TRUE; +- } else if (symbol_exists("per_cpu__tss_page")) { +- init_tss_base = symbol_value("per_cpu__tss_page"); +- flag = TRUE; +- } else { +- init_tss_base = symbol_value("init_tss"); +- flag = FALSE; +- } +- buf = GETBUF(XEN_HYPER_SIZE(tss)); +- for_cpu_indexes(i, cpuid) +- { +- if (flag) +- init_tss = xen_hyper_per_cpu(init_tss_base, cpuid); +- else +- init_tss = init_tss_base + +- XEN_HYPER_SIZE(tss) * cpuid; +- if (!readmem(init_tss, KVADDR, buf, +- XEN_HYPER_SIZE(tss), "init_tss", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read init_tss.\n"); +- } +- if (machine_type("X86")) { +- sp = ULONG(buf + XEN_HYPER_OFFSET(tss_esp0)); +- } else if (machine_type("X86_64")) { +- sp = ULONG(buf + XEN_HYPER_OFFSET(tss_rsp0)); +- } else +- sp = 0; +- cpu_info = XEN_HYPER_GET_CPU_INFO(sp); +- if (CRASHDEBUG(1)) { +- fprintf(fp, "sp=%lx, cpu_info=%lx\n", sp, cpu_info); +- } +- if(!(bp = xen_hyper_read_pcpu(cpu_info))) { +- error(FATAL, "cannot read cpu_info.\n"); +- } +- pcc = &xhpct->context_array[cpuid]; +- xen_hyper_store_pcpu_context(pcc, cpu_info, bp); +- xen_hyper_store_pcpu_context_tss(pcc, init_tss, buf); +- } +- FREEBUF(buf); ++void xen_hyper_x86_pcpu_init(void) { ++ ulong cpu_info; ++ ulong init_tss_base, init_tss = 0; ++ ulong sp; ++ struct xen_hyper_pcpu_context *pcc; ++ char *buf, *bp; ++ int i, cpuid; ++ int flag; ++ ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_guest_cpu_user_regs, "cpu_info", ++ "guest_cpu_user_regs"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_processor_id, "cpu_info", ++ "processor_id"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_current_vcpu, "cpu_info", ++ "current_vcpu"); ++ ++ if ((xhpct->pcpu_struct = malloc(XEN_HYPER_SIZE(cpu_info))) == NULL) { ++ error(FATAL, "cannot malloc pcpu struct space.\n"); ++ } ++ /* get physical cpu context */ ++ xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS()); ++ if (symbol_exists("per_cpu__init_tss")) { ++ init_tss_base = symbol_value("per_cpu__init_tss"); ++ flag = TRUE; ++ } else if (symbol_exists("per_cpu__tss_page")) { ++ init_tss_base = symbol_value("per_cpu__tss_page"); ++ flag = TRUE; ++ } else { ++ init_tss_base = symbol_value("init_tss"); ++ flag = FALSE; ++ } ++ buf = GETBUF(XEN_HYPER_SIZE(tss)); ++ for_cpu_indexes(i, cpuid) { ++ if (flag) ++ init_tss = xen_hyper_per_cpu(init_tss_base, cpuid); ++ else ++ init_tss = init_tss_base + XEN_HYPER_SIZE(tss) * cpuid; ++ if (!readmem(init_tss, KVADDR, buf, XEN_HYPER_SIZE(tss), "init_tss", ++ RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read init_tss.\n"); ++ } ++ if (machine_type("X86")) { ++ sp = ULONG(buf + XEN_HYPER_OFFSET(tss_esp0)); ++ } else if (machine_type("X86_64")) { ++ sp = ULONG(buf + XEN_HYPER_OFFSET(tss_rsp0)); ++ } else ++ sp = 0; ++ cpu_info = XEN_HYPER_GET_CPU_INFO(sp); ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "sp=%lx, cpu_info=%lx\n", sp, cpu_info); ++ } ++ if (!(bp = xen_hyper_read_pcpu(cpu_info))) { ++ error(FATAL, "cannot read cpu_info.\n"); ++ } ++ pcc = &xhpct->context_array[cpuid]; ++ xen_hyper_store_pcpu_context(pcc, cpu_info, bp); ++ xen_hyper_store_pcpu_context_tss(pcc, init_tss, buf); ++ } ++ FREEBUF(buf); + } + + #elif defined(IA64) +-void +-xen_hyper_ia64_pcpu_init(void) +-{ +- struct xen_hyper_pcpu_context *pcc; +- int i, cpuid; +- +- /* get physical cpu context */ +- xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS()); +- for_cpu_indexes(i, cpuid) +- { +- pcc = &xhpct->context_array[cpuid]; +- pcc->processor_id = cpuid; +- } ++void xen_hyper_ia64_pcpu_init(void) { ++ struct xen_hyper_pcpu_context *pcc; ++ int i, cpuid; ++ ++ /* get physical cpu context */ ++ xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS()); ++ for_cpu_indexes(i, cpuid) { ++ pcc = &xhpct->context_array[cpuid]; ++ pcc->processor_id = cpuid; ++ } + } + #endif + +@@ -403,35 +429,41 @@ xen_hyper_ia64_pcpu_init(void) + * Do initialization for some miscellaneous thing + * of Xen Hyper system here. + */ +-void +-xen_hyper_misc_init(void) +-{ +- XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data"); +- XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock, "schedule_data", "schedule_lock"); +- XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data", "curr"); +- if (MEMBER_EXISTS("schedule_data", "idle")) +- XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle, "schedule_data", "idle"); +- XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv, "schedule_data", "sched_priv"); +- XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer, "schedule_data", "s_timer"); +- XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data", "tick"); +- +- XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler", "name"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_opt_name, "scheduler", "opt_name"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sched_id, "scheduler", "sched_id"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init, "scheduler", "init"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_tick, "scheduler", "tick"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init_vcpu, "scheduler", "init_vcpu"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_destroy_domain, "scheduler", "destroy_domain"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sleep, "scheduler", "sleep"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_wake, "scheduler", "wake"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_set_affinity, "scheduler", "set_affinity"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_do_schedule, "scheduler", "do_schedule"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_adjust, "scheduler", "adjust"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_settings, "scheduler", "dump_settings"); +- XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_cpu_state, "scheduler", "dump_cpu_state"); ++void xen_hyper_misc_init(void) { ++ XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock, "schedule_data", ++ "schedule_lock"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data", "curr"); ++ if (MEMBER_EXISTS("schedule_data", "idle")) ++ XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle, "schedule_data", "idle"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv, "schedule_data", ++ "sched_priv"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer, "schedule_data", ++ "s_timer"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data", "tick"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler", "name"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_opt_name, "scheduler", "opt_name"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sched_id, "scheduler", "sched_id"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init, "scheduler", "init"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_tick, "scheduler", "tick"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init_vcpu, "scheduler", "init_vcpu"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_destroy_domain, "scheduler", ++ "destroy_domain"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sleep, "scheduler", "sleep"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_wake, "scheduler", "wake"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_set_affinity, "scheduler", ++ "set_affinity"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_do_schedule, "scheduler", ++ "do_schedule"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_adjust, "scheduler", "adjust"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_settings, "scheduler", ++ "dump_settings"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_cpu_state, "scheduler", ++ "dump_cpu_state"); + +- xen_hyper_schedule_init(); ++ xen_hyper_schedule_init(); + } + + /* +@@ -439,482 +471,455 @@ xen_hyper_misc_init(void) + */ + #define XEN_HYPER_SCHEDULER_NAME 1024 + +-static int section_size(char *start_section, char *end_section) +-{ +- ulong sp_start, sp_end; +- +- sp_start = symbol_value(start_section); +- sp_end = symbol_value(end_section); +- +- return (sp_end - sp_start) / sizeof(long); +-} +- +-static void +-xen_hyper_schedule_init(void) +-{ +- ulong addr, opt_sched, schedulers, opt_name; +- long scheduler_opt_name; +- long *schedulers_buf; +- int nr_schedulers; +- struct xen_hyper_sched_context *schc; +- char *buf; +- char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE]; +- int i, cpuid, flag; +- char *sp_name; +- +- /* get scheduler information */ +- if((xhscht->scheduler_struct = +- malloc(XEN_HYPER_SIZE(scheduler))) == NULL) { +- error(FATAL, "cannot malloc scheduler struct space.\n"); +- } +- buf = GETBUF(XEN_HYPER_SCHEDULER_NAME); +- scheduler_opt_name = XEN_HYPER_OFFSET(scheduler_opt_name); +- if (symbol_exists("ops")) { +- if (!readmem(symbol_value("ops") + scheduler_opt_name, KVADDR, +- &opt_sched, sizeof(ulong), "ops.opt_name", +- RETURN_ON_ERROR)) { +- error(FATAL, "cannot read ops.opt_name.\n"); +- } +- } else { +- opt_sched = symbol_value("opt_sched"); +- } +- if (!readmem(opt_sched, KVADDR, xhscht->opt_sched, +- XEN_HYPER_OPT_SCHED_SIZE, "opt_sched,", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read opt_sched,.\n"); +- } +- +- /* symbol exists since Xen 4.7 */ +- if (symbol_exists("__start_schedulers_array")) { +- sp_name = "__start_schedulers_array"; +- nr_schedulers = section_size("__start_schedulers_array", +- "__end_schedulers_array"); +- } else { +- sp_name = "schedulers"; +- nr_schedulers = get_array_length("schedulers", 0, 0); +- } +- +- schedulers_buf = (long *)GETBUF(nr_schedulers * sizeof(long)); +- schedulers = symbol_value(sp_name); +- addr = schedulers; +- while (xhscht->name == NULL) { +- if (!readmem(addr, KVADDR, schedulers_buf, +- sizeof(long) * nr_schedulers, +- "schedulers", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read schedulers.\n"); +- } +- for (i = 0; i < nr_schedulers; i++) { +- if (schedulers_buf[i] == 0) { +- error(FATAL, "schedule data not found.\n"); +- } +- if (!readmem(schedulers_buf[i], KVADDR, +- xhscht->scheduler_struct, XEN_HYPER_SIZE(scheduler), +- "scheduler", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read scheduler.\n"); +- } +- opt_name = ULONG(xhscht->scheduler_struct + +- scheduler_opt_name); +- if (!readmem(opt_name, KVADDR, opt_name_buf, +- XEN_HYPER_OPT_SCHED_SIZE, "opt_name", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read opt_name.\n"); +- } +- if (strncmp(xhscht->opt_sched, opt_name_buf, +- XEN_HYPER_OPT_SCHED_SIZE)) +- continue; +- xhscht->scheduler = schedulers_buf[i]; +- xhscht->sched_id = INT(xhscht->scheduler_struct + +- XEN_HYPER_OFFSET(scheduler_sched_id)); +- addr = ULONG(xhscht->scheduler_struct + +- XEN_HYPER_OFFSET(scheduler_name)); +- if (!readmem(addr, KVADDR, buf, XEN_HYPER_SCHEDULER_NAME, +- "scheduler_name", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read scheduler_name.\n"); +- } +- if (strlen(buf) >= XEN_HYPER_SCHEDULER_NAME) { +- error(FATAL, "cannot read scheduler_name.\n"); +- } +- if((xhscht->name = malloc(strlen(buf) + 1)) == NULL) { +- error(FATAL, "cannot malloc scheduler_name space.\n"); +- } +- BZERO(xhscht->name, strlen(buf) + 1); +- BCOPY(buf, xhscht->name, strlen(buf)); +- break; +- } +- addr += sizeof(long) * nr_schedulers; +- } +- FREEBUF(buf); +- FREEBUF(schedulers_buf); +- +- /* get schedule_data information */ +- if((xhscht->sched_context_array = +- malloc(sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS())) == NULL) { +- error(FATAL, "cannot malloc xen_hyper_sched_context struct space.\n"); +- } +- BZERO(xhscht->sched_context_array, +- sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS()); +- buf = GETBUF(XEN_HYPER_SIZE(schedule_data)); +- if (symbol_exists("per_cpu__schedule_data")) { +- addr = symbol_value("per_cpu__schedule_data"); +- flag = TRUE; +- } else { +- addr = symbol_value("schedule_data"); +- flag = FALSE; +- } +- for_cpu_indexes(i, cpuid) +- { +- schc = &xhscht->sched_context_array[cpuid]; +- if (flag) { +- schc->schedule_data = +- xen_hyper_per_cpu(addr, i); +- } else { +- schc->schedule_data = addr + +- XEN_HYPER_SIZE(schedule_data) * i; +- } +- if (!readmem(schc->schedule_data, +- KVADDR, buf, XEN_HYPER_SIZE(schedule_data), +- "schedule_data", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read schedule_data.\n"); +- } +- schc->cpu_id = cpuid; +- schc->curr = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_curr)); +- if (MEMBER_EXISTS("schedule_data", "idle")) +- schc->idle = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_idle)); +- else +- schc->idle = xht->idle_vcpu_array[cpuid]; +- schc->sched_priv = +- ULONG(buf + XEN_HYPER_OFFSET(schedule_data_sched_priv)); +- if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) +- schc->tick = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_tick)); +- } +- FREEBUF(buf); ++static int section_size(char *start_section, char *end_section) { ++ ulong sp_start, sp_end; ++ ++ sp_start = symbol_value(start_section); ++ sp_end = symbol_value(end_section); ++ ++ return (sp_end - sp_start) / sizeof(long); ++} ++ ++static void xen_hyper_schedule_init(void) { ++ ulong addr, opt_sched, schedulers, opt_name; ++ long scheduler_opt_name; ++ long *schedulers_buf; ++ int nr_schedulers; ++ struct xen_hyper_sched_context *schc; ++ char *buf; ++ char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE]; ++ int i, cpuid, flag; ++ char *sp_name; ++ ++ /* get scheduler information */ ++ if ((xhscht->scheduler_struct = malloc(XEN_HYPER_SIZE(scheduler))) == NULL) { ++ error(FATAL, "cannot malloc scheduler struct space.\n"); ++ } ++ buf = GETBUF(XEN_HYPER_SCHEDULER_NAME); ++ scheduler_opt_name = XEN_HYPER_OFFSET(scheduler_opt_name); ++ if (symbol_exists("ops")) { ++ if (!readmem(symbol_value("ops") + scheduler_opt_name, KVADDR, &opt_sched, ++ sizeof(ulong), "ops.opt_name", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read ops.opt_name.\n"); ++ } ++ } else { ++ opt_sched = symbol_value("opt_sched"); ++ } ++ if (!readmem(opt_sched, KVADDR, xhscht->opt_sched, XEN_HYPER_OPT_SCHED_SIZE, ++ "opt_sched,", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read opt_sched,.\n"); ++ } ++ ++ /* symbol exists since Xen 4.7 */ ++ if (symbol_exists("__start_schedulers_array")) { ++ sp_name = "__start_schedulers_array"; ++ nr_schedulers = ++ section_size("__start_schedulers_array", "__end_schedulers_array"); ++ } else { ++ sp_name = "schedulers"; ++ nr_schedulers = get_array_length("schedulers", 0, 0); ++ } ++ ++ schedulers_buf = (long *)GETBUF(nr_schedulers * sizeof(long)); ++ schedulers = symbol_value(sp_name); ++ addr = schedulers; ++ while (xhscht->name == NULL) { ++ if (!readmem(addr, KVADDR, schedulers_buf, sizeof(long) * nr_schedulers, ++ "schedulers", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read schedulers.\n"); ++ } ++ for (i = 0; i < nr_schedulers; i++) { ++ if (schedulers_buf[i] == 0) { ++ error(FATAL, "schedule data not found.\n"); ++ } ++ if (!readmem(schedulers_buf[i], KVADDR, xhscht->scheduler_struct, ++ XEN_HYPER_SIZE(scheduler), "scheduler", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read scheduler.\n"); ++ } ++ opt_name = ULONG(xhscht->scheduler_struct + scheduler_opt_name); ++ if (!readmem(opt_name, KVADDR, opt_name_buf, XEN_HYPER_OPT_SCHED_SIZE, ++ "opt_name", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read opt_name.\n"); ++ } ++ if (strncmp(xhscht->opt_sched, opt_name_buf, XEN_HYPER_OPT_SCHED_SIZE)) ++ continue; ++ xhscht->scheduler = schedulers_buf[i]; ++ xhscht->sched_id = ++ INT(xhscht->scheduler_struct + XEN_HYPER_OFFSET(scheduler_sched_id)); ++ addr = ULONG(xhscht->scheduler_struct + XEN_HYPER_OFFSET(scheduler_name)); ++ if (!readmem(addr, KVADDR, buf, XEN_HYPER_SCHEDULER_NAME, ++ "scheduler_name", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read scheduler_name.\n"); ++ } ++ if (strlen(buf) >= XEN_HYPER_SCHEDULER_NAME) { ++ error(FATAL, "cannot read scheduler_name.\n"); ++ } ++ if ((xhscht->name = malloc(strlen(buf) + 1)) == NULL) { ++ error(FATAL, "cannot malloc scheduler_name space.\n"); ++ } ++ BZERO(xhscht->name, strlen(buf) + 1); ++ BCOPY(buf, xhscht->name, strlen(buf)); ++ break; ++ } ++ addr += sizeof(long) * nr_schedulers; ++ } ++ FREEBUF(buf); ++ FREEBUF(schedulers_buf); ++ ++ /* get schedule_data information */ ++ if ((xhscht->sched_context_array = malloc( ++ sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS())) == ++ NULL) { ++ error(FATAL, "cannot malloc xen_hyper_sched_context struct space.\n"); ++ } ++ BZERO(xhscht->sched_context_array, ++ sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS()); ++ buf = GETBUF(XEN_HYPER_SIZE(schedule_data)); ++ if (symbol_exists("per_cpu__schedule_data")) { ++ addr = symbol_value("per_cpu__schedule_data"); ++ flag = TRUE; ++ } else { ++ addr = symbol_value("schedule_data"); ++ flag = FALSE; ++ } ++ for_cpu_indexes(i, cpuid) { ++ schc = &xhscht->sched_context_array[cpuid]; ++ if (flag) { ++ schc->schedule_data = xen_hyper_per_cpu(addr, i); ++ } else { ++ schc->schedule_data = addr + XEN_HYPER_SIZE(schedule_data) * i; ++ } ++ if (!readmem(schc->schedule_data, KVADDR, buf, ++ XEN_HYPER_SIZE(schedule_data), "schedule_data", ++ RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read schedule_data.\n"); ++ } ++ schc->cpu_id = cpuid; ++ schc->curr = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_curr)); ++ if (MEMBER_EXISTS("schedule_data", "idle")) ++ schc->idle = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_idle)); ++ else ++ schc->idle = xht->idle_vcpu_array[cpuid]; ++ schc->sched_priv = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_sched_priv)); ++ if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) ++ schc->tick = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_tick)); ++ } ++ FREEBUF(buf); + } + + /* + * This should be called after all initailize process finished. + */ +-void +-xen_hyper_post_init(void) +-{ +- struct xen_hyper_pcpu_context *pcc; +- int i, cpuid; +- +- /* set current vcpu to pcpu context */ +- for_cpu_indexes(i, cpuid) +- { +- pcc = &xhpct->context_array[cpuid]; +- if (!pcc->current_vcpu) { +- pcc->current_vcpu = +- xen_hyper_get_active_vcpu_from_pcpuid(cpuid); +- } +- } +- +- /* set pcpu last */ +- if (!(xhpct->last = +- xen_hyper_id_to_pcpu_context(XEN_HYPER_CRASHING_CPU()))) { +- xhpct->last = &xhpct->context_array[xht->cpu_idxs[0]]; +- } +- +- /* set vcpu last */ +- if (xhpct->last) { +- xhvct->last = +- xen_hyper_vcpu_to_vcpu_context(xhpct->last->current_vcpu); +- /* set crashing vcpu */ +- xht->crashing_vcc = xhvct->last; +- } +- if (!xhvct->last) { +- xhvct->last = xhvct->vcpu_context_arrays->context_array; +- } +- +- /* set domain last */ +- if (xhvct->last) { +- xhdt->last = +- xen_hyper_domain_to_domain_context(xhvct->last->domain); +- } +- if (!xhdt->last) { +- xhdt->last = xhdt->context_array; +- } ++void xen_hyper_post_init(void) { ++ struct xen_hyper_pcpu_context *pcc; ++ int i, cpuid; ++ ++ /* set current vcpu to pcpu context */ ++ for_cpu_indexes(i, cpuid) { ++ pcc = &xhpct->context_array[cpuid]; ++ if (!pcc->current_vcpu) { ++ pcc->current_vcpu = xen_hyper_get_active_vcpu_from_pcpuid(cpuid); ++ } ++ } ++ ++ /* set pcpu last */ ++ if (!(xhpct->last = xen_hyper_id_to_pcpu_context(XEN_HYPER_CRASHING_CPU()))) { ++ xhpct->last = &xhpct->context_array[xht->cpu_idxs[0]]; ++ } ++ ++ /* set vcpu last */ ++ if (xhpct->last) { ++ xhvct->last = xen_hyper_vcpu_to_vcpu_context(xhpct->last->current_vcpu); ++ /* set crashing vcpu */ ++ xht->crashing_vcc = xhvct->last; ++ } ++ if (!xhvct->last) { ++ xhvct->last = xhvct->vcpu_context_arrays->context_array; ++ } ++ ++ /* set domain last */ ++ if (xhvct->last) { ++ xhdt->last = xen_hyper_domain_to_domain_context(xhvct->last->domain); ++ } ++ if (!xhdt->last) { ++ xhdt->last = xhdt->context_array; ++ } + } + + /* + * Do initialization for dump information here. + */ +-void +-xen_hyper_dumpinfo_init(void) +-{ +- Elf32_Nhdr *note; +- char *buf, *bp, *np, *upp; +- char *nccp, *xccp; +- ulong addr; +- long size; +- int i, cpuid, samp_cpuid; +- +- /* +- * NOTE kakuma: It is not clear that what kind of +- * a elf note format each one of the xen uses. +- * So, we decide it confirming whether a symbol exists. +- */ +- if (STRUCT_EXISTS("note_buf_t")) +- xhdit->note_ver = XEN_HYPER_ELF_NOTE_V1; +- else if (STRUCT_EXISTS("crash_note_xen_t")) +- xhdit->note_ver = XEN_HYPER_ELF_NOTE_V2; +- else if (STRUCT_EXISTS("crash_xen_core_t")) { +- if (STRUCT_EXISTS("crash_note_xen_core_t")) +- xhdit->note_ver = XEN_HYPER_ELF_NOTE_V3; +- else +- xhdit->note_ver = XEN_HYPER_ELF_NOTE_V4; +- } else { +- error(WARNING, "found unsupported elf note format while checking of xen dumpinfo.\n"); +- return; +- } +- if (!xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { +- error(WARNING, "crashing_cpu not found.\n"); +- return; +- } +- +- /* allocate a context area */ +- size = sizeof(struct xen_hyper_dumpinfo_context) * machdep->get_smp_cpus(); +- if((xhdit->context_array = malloc(size)) == NULL) { +- error(FATAL, "cannot malloc dumpinfo table context space.\n"); +- } +- BZERO(xhdit->context_array, size); +- size = sizeof(struct xen_hyper_dumpinfo_context_xen_core) * machdep->get_smp_cpus(); +- if((xhdit->context_xen_core_array = malloc(size)) == NULL) { +- error(FATAL, "cannot malloc dumpinfo table context_xen_core_array space.\n"); +- } +- BZERO(xhdit->context_xen_core_array, size); +- if (symbol_exists("per_cpu__crash_notes")) +- addr = symbol_value("per_cpu__crash_notes"); +- else +- get_symbol_data("crash_notes", sizeof(ulong), &addr); +- for (i = 0; i < machdep->get_smp_cpus(); i++) { +- ulong addr_notes; +- +- if (symbol_exists("per_cpu__crash_notes")) +- addr_notes = xen_hyper_per_cpu(addr, i); +- else +- addr_notes = addr + i * STRUCT_SIZE("crash_note_range_t") + +- MEMBER_OFFSET("crash_note_range_t", "start"); +- if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) { +- if (!readmem(addr_notes, KVADDR, &(xhdit->context_array[i].note), +- sizeof(ulong), "crash_notes", RETURN_ON_ERROR)) { +- error(WARNING, "cannot read crash_notes.\n"); +- return; +- } +- } else { +- xhdit->context_array[i].note = addr_notes; +- } +- } +- +- if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V1) { +- xhdit->note_size = XEN_HYPER_SIZE(note_buf_t); +- } else if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) { +- xhdit->note_size = XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE; +- } else { +- xhdit->note_size = XEN_HYPER_SIZE(crash_note_t); +- } +- +- /* read a sample note */ +- buf = GETBUF(xhdit->note_size); +- if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) +- samp_cpuid = xht->cpu_idxs[0]; +- else +- samp_cpuid = XEN_HYPER_CRASHING_CPU(); +- xhdit->xen_info_cpu = samp_cpuid; +- if (!xen_hyper_fill_elf_notes(xhdit->context_array[samp_cpuid].note, +- buf, XEN_HYPER_ELF_NOTE_FILL_T_NOTE)) { +- error(FATAL, "cannot read crash_notes.\n"); +- } +- bp = buf; +- +- /* Get elf format information for each version. */ +- switch (xhdit->note_ver) { +- case XEN_HYPER_ELF_NOTE_V1: +- /* core data */ +- note = (Elf32_Nhdr *)bp; +- np = bp + sizeof(Elf32_Nhdr); +- upp = np + note->n_namesz; +- upp = (char *)roundup((ulong)upp, 4); +- xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note); +- note = (Elf32_Nhdr *)(upp + note->n_descsz); +- /* cr3 data */ +- np = (char *)note + sizeof(Elf32_Nhdr); +- upp = np + note->n_namesz; +- upp = (char *)roundup((ulong)upp, 4); +- upp = upp + note->n_descsz; +- xhdit->core_size = upp - bp; +- break; +- case XEN_HYPER_ELF_NOTE_V2: +- /* core data */ +- xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc); +- xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t); +- /* xen core */ +- xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_t_desc); +- xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_t); +- break; +- case XEN_HYPER_ELF_NOTE_V3: +- /* core data */ +- xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc); +- xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t); +- /* xen core */ +- xhdit->xen_core_offset = XEN_HYPER_OFFSET(crash_note_xen_core_t_desc); +- xhdit->xen_core_size = XEN_HYPER_SIZE(crash_note_xen_core_t); +- /* xen info */ +- xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_info_t_desc); +- xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_info_t); +- break; +- case XEN_HYPER_ELF_NOTE_V4: +- /* core data */ +- note = (Elf32_Nhdr *)bp; +- np = bp + sizeof(Elf32_Nhdr); +- upp = np + note->n_namesz; +- upp = (char *)roundup((ulong)upp, 4); +- xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note); +- upp = upp + note->n_descsz; +- xhdit->core_size = (Elf_Word)((ulong)upp - (ulong)note); +- if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + 32) { +- error(WARNING, "note size is assumed on crash is incorrect.(core data)\n"); +- return; +- } +- /* xen core */ +- note = (Elf32_Nhdr *)upp; +- np = (char *)note + sizeof(Elf32_Nhdr); +- upp = np + note->n_namesz; +- upp = (char *)roundup((ulong)upp, 4); +- xhdit->xen_core_offset = (Elf_Word)((ulong)upp - (ulong)note); +- upp = upp + note->n_descsz; +- xhdit->xen_core_size = (Elf_Word)((ulong)upp - (ulong)note); +- if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < +- xhdit->core_size + xhdit->xen_core_size + 32) { +- error(WARNING, "note size is assumed on crash is incorrect.(xen core)\n"); +- return; +- } +- /* xen info */ +- note = (Elf32_Nhdr *)upp; +- np = (char *)note + sizeof(Elf32_Nhdr); +- upp = np + note->n_namesz; +- upp = (char *)roundup((ulong)upp, 4); +- xhdit->xen_info_offset = (Elf_Word)((ulong)upp - (ulong)note); +- upp = upp + note->n_descsz; +- xhdit->xen_info_size = (Elf_Word)((ulong)upp - (ulong)note); +- if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < +- xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size) { +- error(WARNING, "note size is assumed on crash is incorrect.(xen info)\n"); +- return; +- } +- xhdit->note_size = xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size; +- break; +- default: +- error(FATAL, "logic error in cheking elf note format occurs.\n"); +- } +- +- /* fill xen info context. */ +- if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { +- if((xhdit->crash_note_xen_info_ptr = +- malloc(xhdit->xen_info_size)) == NULL) { +- error(FATAL, "cannot malloc dumpinfo table " +- "crash_note_xen_info_ptr space.\n"); +- } +- memcpy(xhdit->crash_note_xen_info_ptr, +- bp + xhdit->core_size + xhdit->xen_core_size, +- xhdit->xen_info_size); +- xhdit->context_xen_info.note = +- xhdit->context_array[samp_cpuid].note + +- xhdit->core_size + xhdit->xen_core_size; +- xhdit->context_xen_info.pcpu_id = samp_cpuid; +- xhdit->context_xen_info.crash_xen_info_ptr = +- xhdit->crash_note_xen_info_ptr + xhdit->xen_info_offset; +- } +- +- /* allocate note core */ +- size = xhdit->core_size * XEN_HYPER_NR_PCPUS(); +- if(!(xhdit->crash_note_core_array = malloc(size))) { +- error(FATAL, "cannot malloc crash_note_core_array space.\n"); +- } +- nccp = xhdit->crash_note_core_array; +- BZERO(nccp, size); +- xccp = NULL; +- +- /* allocate xen core */ +- if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { +- size = xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(); +- if(!(xhdit->crash_note_xen_core_array = malloc(size))) { +- error(FATAL, "cannot malloc dumpinfo table " +- "crash_note_xen_core_array space.\n"); +- } +- xccp = xhdit->crash_note_xen_core_array; +- BZERO(xccp, size); +- } +- +- /* fill a context. */ +- for_cpu_indexes(i, cpuid) +- { +- /* fill core context. */ +- addr = xhdit->context_array[cpuid].note; +- if (!xen_hyper_fill_elf_notes(addr, nccp, +- XEN_HYPER_ELF_NOTE_FILL_T_CORE)) { +- error(FATAL, "cannot read elf note core.\n"); +- } +- xhdit->context_array[cpuid].pcpu_id = cpuid; +- xhdit->context_array[cpuid].ELF_Prstatus_ptr = +- nccp + xhdit->core_offset; +- xhdit->context_array[cpuid].pr_reg_ptr = +- nccp + xhdit->core_offset + +- XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg); +- +- /* Is there xen core data? */ +- if (xhdit->note_ver < XEN_HYPER_ELF_NOTE_V2) { +- nccp += xhdit->core_size; +- continue; +- } +- if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V2 && +- cpuid != samp_cpuid) { +- xccp += xhdit->xen_core_size; +- nccp += xhdit->core_size; +- continue; +- } +- +- /* fill xen core context, in case of more elf note V2. */ +- xhdit->context_xen_core_array[cpuid].note = +- xhdit->context_array[cpuid].note + +- xhdit->core_size; +- xhdit->context_xen_core_array[cpuid].pcpu_id = cpuid; +- xhdit->context_xen_core_array[cpuid].crash_xen_core_ptr = +- xccp + xhdit->xen_core_offset; +- if (!xen_hyper_fill_elf_notes(xhdit->context_xen_core_array[cpuid].note, +- xccp, XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)) { +- error(FATAL, "cannot read elf note xen core.\n"); +- } +- xccp += xhdit->xen_core_size; +- nccp += xhdit->core_size; +- } ++void xen_hyper_dumpinfo_init(void) { ++ Elf32_Nhdr *note; ++ char *buf, *bp, *np, *upp; ++ char *nccp, *xccp; ++ ulong addr; ++ long size; ++ int i, cpuid, samp_cpuid; ++ ++ /* ++ * NOTE kakuma: It is not clear that what kind of ++ * a elf note format each one of the xen uses. ++ * So, we decide it confirming whether a symbol exists. ++ */ ++ if (STRUCT_EXISTS("note_buf_t")) ++ xhdit->note_ver = XEN_HYPER_ELF_NOTE_V1; ++ else if (STRUCT_EXISTS("crash_note_xen_t")) ++ xhdit->note_ver = XEN_HYPER_ELF_NOTE_V2; ++ else if (STRUCT_EXISTS("crash_xen_core_t")) { ++ if (STRUCT_EXISTS("crash_note_xen_core_t")) ++ xhdit->note_ver = XEN_HYPER_ELF_NOTE_V3; ++ else ++ xhdit->note_ver = XEN_HYPER_ELF_NOTE_V4; ++ } else { ++ error( ++ WARNING, ++ "found unsupported elf note format while checking of xen dumpinfo.\n"); ++ return; ++ } ++ if (!xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { ++ error(WARNING, "crashing_cpu not found.\n"); ++ return; ++ } ++ ++ /* allocate a context area */ ++ size = sizeof(struct xen_hyper_dumpinfo_context) * machdep->get_smp_cpus(); ++ if ((xhdit->context_array = malloc(size)) == NULL) { ++ error(FATAL, "cannot malloc dumpinfo table context space.\n"); ++ } ++ BZERO(xhdit->context_array, size); ++ size = sizeof(struct xen_hyper_dumpinfo_context_xen_core) * ++ machdep->get_smp_cpus(); ++ if ((xhdit->context_xen_core_array = malloc(size)) == NULL) { ++ error(FATAL, ++ "cannot malloc dumpinfo table context_xen_core_array space.\n"); ++ } ++ BZERO(xhdit->context_xen_core_array, size); ++ if (symbol_exists("per_cpu__crash_notes")) ++ addr = symbol_value("per_cpu__crash_notes"); ++ else ++ get_symbol_data("crash_notes", sizeof(ulong), &addr); ++ for (i = 0; i < machdep->get_smp_cpus(); i++) { ++ ulong addr_notes; ++ ++ if (symbol_exists("per_cpu__crash_notes")) ++ addr_notes = xen_hyper_per_cpu(addr, i); ++ else ++ addr_notes = addr + i * STRUCT_SIZE("crash_note_range_t") + ++ MEMBER_OFFSET("crash_note_range_t", "start"); ++ if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) { ++ if (!readmem(addr_notes, KVADDR, &(xhdit->context_array[i].note), ++ sizeof(ulong), "crash_notes", RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read crash_notes.\n"); ++ return; ++ } ++ } else { ++ xhdit->context_array[i].note = addr_notes; ++ } ++ } ++ ++ if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V1) { ++ xhdit->note_size = XEN_HYPER_SIZE(note_buf_t); ++ } else if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) { ++ xhdit->note_size = XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE; ++ } else { ++ xhdit->note_size = XEN_HYPER_SIZE(crash_note_t); ++ } ++ ++ /* read a sample note */ ++ buf = GETBUF(xhdit->note_size); ++ if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) ++ samp_cpuid = xht->cpu_idxs[0]; ++ else ++ samp_cpuid = XEN_HYPER_CRASHING_CPU(); ++ xhdit->xen_info_cpu = samp_cpuid; ++ if (!xen_hyper_fill_elf_notes(xhdit->context_array[samp_cpuid].note, buf, ++ XEN_HYPER_ELF_NOTE_FILL_T_NOTE)) { ++ error(FATAL, "cannot read crash_notes.\n"); ++ } ++ bp = buf; ++ ++ /* Get elf format information for each version. */ ++ switch (xhdit->note_ver) { ++ case XEN_HYPER_ELF_NOTE_V1: ++ /* core data */ ++ note = (Elf32_Nhdr *)bp; ++ np = bp + sizeof(Elf32_Nhdr); ++ upp = np + note->n_namesz; ++ upp = (char *)roundup((ulong)upp, 4); ++ xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note); ++ note = (Elf32_Nhdr *)(upp + note->n_descsz); ++ /* cr3 data */ ++ np = (char *)note + sizeof(Elf32_Nhdr); ++ upp = np + note->n_namesz; ++ upp = (char *)roundup((ulong)upp, 4); ++ upp = upp + note->n_descsz; ++ xhdit->core_size = upp - bp; ++ break; ++ case XEN_HYPER_ELF_NOTE_V2: ++ /* core data */ ++ xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc); ++ xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t); ++ /* xen core */ ++ xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_t_desc); ++ xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_t); ++ break; ++ case XEN_HYPER_ELF_NOTE_V3: ++ /* core data */ ++ xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc); ++ xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t); ++ /* xen core */ ++ xhdit->xen_core_offset = XEN_HYPER_OFFSET(crash_note_xen_core_t_desc); ++ xhdit->xen_core_size = XEN_HYPER_SIZE(crash_note_xen_core_t); ++ /* xen info */ ++ xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_info_t_desc); ++ xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_info_t); ++ break; ++ case XEN_HYPER_ELF_NOTE_V4: ++ /* core data */ ++ note = (Elf32_Nhdr *)bp; ++ np = bp + sizeof(Elf32_Nhdr); ++ upp = np + note->n_namesz; ++ upp = (char *)roundup((ulong)upp, 4); ++ xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note); ++ upp = upp + note->n_descsz; ++ xhdit->core_size = (Elf_Word)((ulong)upp - (ulong)note); ++ if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + 32) { ++ error(WARNING, ++ "note size is assumed on crash is incorrect.(core data)\n"); ++ return; ++ } ++ /* xen core */ ++ note = (Elf32_Nhdr *)upp; ++ np = (char *)note + sizeof(Elf32_Nhdr); ++ upp = np + note->n_namesz; ++ upp = (char *)roundup((ulong)upp, 4); ++ xhdit->xen_core_offset = (Elf_Word)((ulong)upp - (ulong)note); ++ upp = upp + note->n_descsz; ++ xhdit->xen_core_size = (Elf_Word)((ulong)upp - (ulong)note); ++ if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < ++ xhdit->core_size + xhdit->xen_core_size + 32) { ++ error(WARNING, "note size is assumed on crash is incorrect.(xen core)\n"); ++ return; ++ } ++ /* xen info */ ++ note = (Elf32_Nhdr *)upp; ++ np = (char *)note + sizeof(Elf32_Nhdr); ++ upp = np + note->n_namesz; ++ upp = (char *)roundup((ulong)upp, 4); ++ xhdit->xen_info_offset = (Elf_Word)((ulong)upp - (ulong)note); ++ upp = upp + note->n_descsz; ++ xhdit->xen_info_size = (Elf_Word)((ulong)upp - (ulong)note); ++ if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < ++ xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size) { ++ error(WARNING, "note size is assumed on crash is incorrect.(xen info)\n"); ++ return; ++ } ++ xhdit->note_size = ++ xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size; ++ break; ++ default: ++ error(FATAL, "logic error in cheking elf note format occurs.\n"); ++ } ++ ++ /* fill xen info context. */ ++ if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { ++ if ((xhdit->crash_note_xen_info_ptr = malloc(xhdit->xen_info_size)) == ++ NULL) { ++ error(FATAL, "cannot malloc dumpinfo table " ++ "crash_note_xen_info_ptr space.\n"); ++ } ++ memcpy(xhdit->crash_note_xen_info_ptr, ++ bp + xhdit->core_size + xhdit->xen_core_size, xhdit->xen_info_size); ++ xhdit->context_xen_info.note = xhdit->context_array[samp_cpuid].note + ++ xhdit->core_size + xhdit->xen_core_size; ++ xhdit->context_xen_info.pcpu_id = samp_cpuid; ++ xhdit->context_xen_info.crash_xen_info_ptr = ++ xhdit->crash_note_xen_info_ptr + xhdit->xen_info_offset; ++ } ++ ++ /* allocate note core */ ++ size = xhdit->core_size * XEN_HYPER_NR_PCPUS(); ++ if (!(xhdit->crash_note_core_array = malloc(size))) { ++ error(FATAL, "cannot malloc crash_note_core_array space.\n"); ++ } ++ nccp = xhdit->crash_note_core_array; ++ BZERO(nccp, size); ++ xccp = NULL; ++ ++ /* allocate xen core */ ++ if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { ++ size = xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(); ++ if (!(xhdit->crash_note_xen_core_array = malloc(size))) { ++ error(FATAL, "cannot malloc dumpinfo table " ++ "crash_note_xen_core_array space.\n"); ++ } ++ xccp = xhdit->crash_note_xen_core_array; ++ BZERO(xccp, size); ++ } ++ ++ /* fill a context. */ ++ for_cpu_indexes(i, cpuid) { ++ /* fill core context. */ ++ addr = xhdit->context_array[cpuid].note; ++ if (!xen_hyper_fill_elf_notes(addr, nccp, XEN_HYPER_ELF_NOTE_FILL_T_CORE)) { ++ error(FATAL, "cannot read elf note core.\n"); ++ } ++ xhdit->context_array[cpuid].pcpu_id = cpuid; ++ xhdit->context_array[cpuid].ELF_Prstatus_ptr = nccp + xhdit->core_offset; ++ xhdit->context_array[cpuid].pr_reg_ptr = ++ nccp + xhdit->core_offset + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg); ++ ++ /* Is there xen core data? */ ++ if (xhdit->note_ver < XEN_HYPER_ELF_NOTE_V2) { ++ nccp += xhdit->core_size; ++ continue; ++ } ++ if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V2 && cpuid != samp_cpuid) { ++ xccp += xhdit->xen_core_size; ++ nccp += xhdit->core_size; ++ continue; ++ } ++ ++ /* fill xen core context, in case of more elf note V2. */ ++ xhdit->context_xen_core_array[cpuid].note = ++ xhdit->context_array[cpuid].note + xhdit->core_size; ++ xhdit->context_xen_core_array[cpuid].pcpu_id = cpuid; ++ xhdit->context_xen_core_array[cpuid].crash_xen_core_ptr = ++ xccp + xhdit->xen_core_offset; ++ if (!xen_hyper_fill_elf_notes(xhdit->context_xen_core_array[cpuid].note, ++ xccp, XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)) { ++ error(FATAL, "cannot read elf note xen core.\n"); ++ } ++ xccp += xhdit->xen_core_size; ++ nccp += xhdit->core_size; ++ } + +- FREEBUF(buf); ++ FREEBUF(buf); + } + + /* + * Get dump information context from physical cpu id. + */ +-struct xen_hyper_dumpinfo_context * +-xen_hyper_id_to_dumpinfo_context(uint id) +-{ +- if (!xen_hyper_test_pcpu_id(id)) +- return NULL; +- return &xhdit->context_array[id]; ++struct xen_hyper_dumpinfo_context *xen_hyper_id_to_dumpinfo_context(uint id) { ++ if (!xen_hyper_test_pcpu_id(id)) ++ return NULL; ++ return &xhdit->context_array[id]; + } + + /* + * Get dump information context from ELF Note address. + */ + struct xen_hyper_dumpinfo_context * +-xen_hyper_note_to_dumpinfo_context(ulong note) +-{ +- int i; +- +- for (i = 0; i < XEN_HYPER_MAX_CPUS(); i++) { +- if (note == xhdit->context_array[i].note) { +- return &xhdit->context_array[i]; +- } +- } +- return NULL; ++xen_hyper_note_to_dumpinfo_context(ulong note) { ++ int i; ++ ++ for (i = 0; i < XEN_HYPER_MAX_CPUS(); i++) { ++ if (note == xhdit->context_array[i].note) { ++ return &xhdit->context_array[i]; ++ } ++ } ++ return NULL; + } + + /* +@@ -922,166 +927,156 @@ xen_hyper_note_to_dumpinfo_context(ulong + * This assume that variable note has a top address of an area for + * specified type. + */ +-char * +-xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type) +-{ +- long size; +- ulong rp = note; +- +- if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE) +- size = xhdit->note_size; +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE) +- size = xhdit->core_size; +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE) +- size = xhdit->xen_core_size; +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M) +- size = xhdit->core_size + xhdit->xen_core_size; +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS) +- size = XEN_HYPER_SIZE(ELF_Prstatus); +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS) +- size = XEN_HYPER_SIZE(xen_crash_xen_regs_t); +- else +- return NULL; +- +- if (!readmem(rp, KVADDR, note_buf, size, +- "note_buf_t or crash_note_t", RETURN_ON_ERROR)) { +- if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE) +- error(WARNING, "cannot fill note_buf_t or crash_note_t.\n"); +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE) +- error(WARNING, "cannot fill note core.\n"); +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE) +- error(WARNING, "cannot fill note xen core.\n"); +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M) +- error(WARNING, "cannot fill note core & xen core.\n"); +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS) +- error(WARNING, "cannot fill ELF_Prstatus.\n"); +- else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS) +- error(WARNING, "cannot fill xen_crash_xen_regs_t.\n"); +- return NULL; +- } +- return note_buf; ++char *xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type) { ++ long size; ++ ulong rp = note; ++ ++ if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE) ++ size = xhdit->note_size; ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE) ++ size = xhdit->core_size; ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE) ++ size = xhdit->xen_core_size; ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M) ++ size = xhdit->core_size + xhdit->xen_core_size; ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS) ++ size = XEN_HYPER_SIZE(ELF_Prstatus); ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS) ++ size = XEN_HYPER_SIZE(xen_crash_xen_regs_t); ++ else ++ return NULL; ++ ++ if (!readmem(rp, KVADDR, note_buf, size, "note_buf_t or crash_note_t", ++ RETURN_ON_ERROR)) { ++ if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE) ++ error(WARNING, "cannot fill note_buf_t or crash_note_t.\n"); ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE) ++ error(WARNING, "cannot fill note core.\n"); ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE) ++ error(WARNING, "cannot fill note xen core.\n"); ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M) ++ error(WARNING, "cannot fill note core & xen core.\n"); ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS) ++ error(WARNING, "cannot fill ELF_Prstatus.\n"); ++ else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS) ++ error(WARNING, "cannot fill xen_crash_xen_regs_t.\n"); ++ return NULL; ++ } ++ return note_buf; + } + +- +- + /* + * Get domain status. + */ +-ulong +-xen_hyper_domain_state(struct xen_hyper_domain_context *dc) +-{ +- if (ACTIVE()) { +- if (xen_hyper_read_domain_verify(dc->domain) == NULL) { +- return XEN_HYPER_DOMF_ERROR; +- } +- } +- return dc->domain_flags; ++ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc) { ++ if (ACTIVE()) { ++ if (xen_hyper_read_domain_verify(dc->domain) == NULL) { ++ return XEN_HYPER_DOMF_ERROR; ++ } ++ } ++ return dc->domain_flags; + } + + /* + * Allocate domain context space. + */ +-void +-xen_hyper_refresh_domain_context_space(void) +-{ +- char *domain_struct; +- ulong domain, next, dom_xen, dom_io, idle_vcpu; +- struct xen_hyper_domain_context *dc; +- struct xen_hyper_domain_context *dom0; +- int i; +- +- if ((xhdt->flags & XEN_HYPER_DOMAIN_F_INIT) && !ACTIVE()) { +- return; +- } +- +- XEN_HYPER_RUNNING_DOMAINS() = XEN_HYPER_NR_DOMAINS() = +- xen_hyper_get_domains(); +- xen_hyper_alloc_domain_context_space(XEN_HYPER_NR_DOMAINS()); +- +- dc = xhdt->context_array; +- +- /* restore an dom_io context. */ +- get_symbol_data("dom_io", sizeof(dom_io), &dom_io); +- if ((domain_struct = xen_hyper_read_domain(dom_io)) == NULL) { +- error(FATAL, "cannot read dom_io.\n"); +- } +- xen_hyper_store_domain_context(dc, dom_io, domain_struct); +- xhdt->dom_io = dc; +- dc++; +- +- /* restore an dom_xen context. */ +- get_symbol_data("dom_xen", sizeof(dom_xen), &dom_xen); +- if ((domain_struct = xen_hyper_read_domain(dom_xen)) == NULL) { +- error(FATAL, "cannot read dom_xen.\n"); +- } +- xen_hyper_store_domain_context(dc, dom_xen, domain_struct); +- xhdt->dom_xen = dc; +- dc++; +- +- /* restore an idle domain context. */ +- for (i = 0; i < xht->idle_vcpu_size; i += XEN_HYPER_MAX_VIRT_CPUS) { +- idle_vcpu = xht->idle_vcpu_array[i]; +- if (idle_vcpu == 0) +- break; +- if (!readmem(idle_vcpu + MEMBER_OFFSET("vcpu", "domain"), +- KVADDR, &domain, sizeof(domain), "domain", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read domain member in vcpu.\n"); +- } +- if (CRASHDEBUG(1)) { +- fprintf(fp, "idle_vcpu=%lx, domain=%lx\n", idle_vcpu, domain); +- } +- if ((domain_struct = xen_hyper_read_domain(domain)) == NULL) { +- error(FATAL, "cannot read idle domain.\n"); +- } +- xen_hyper_store_domain_context(dc, domain, domain_struct); +- if (i == 0) +- xhdt->idle_domain = dc; +- dc++; +- } +- +- /* restore domain contexts from dom0 symbol. */ +- xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_DOM0, &next); +- domain = next; +- dom0 = dc; +- while((domain_struct = +- xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_NEXT, &next)) != NULL) { +- xen_hyper_store_domain_context(dc, domain, domain_struct); +- domain = next; +- dc++; +- } +- xhdt->dom0 = dom0; ++void xen_hyper_refresh_domain_context_space(void) { ++ char *domain_struct; ++ ulong domain, next, dom_xen, dom_io, idle_vcpu; ++ struct xen_hyper_domain_context *dc; ++ struct xen_hyper_domain_context *dom0; ++ int i; ++ ++ if ((xhdt->flags & XEN_HYPER_DOMAIN_F_INIT) && !ACTIVE()) { ++ return; ++ } ++ ++ XEN_HYPER_RUNNING_DOMAINS() = XEN_HYPER_NR_DOMAINS() = ++ xen_hyper_get_domains(); ++ xen_hyper_alloc_domain_context_space(XEN_HYPER_NR_DOMAINS()); ++ ++ dc = xhdt->context_array; ++ ++ /* restore an dom_io context. */ ++ get_symbol_data("dom_io", sizeof(dom_io), &dom_io); ++ if ((domain_struct = xen_hyper_read_domain(dom_io)) == NULL) { ++ error(FATAL, "cannot read dom_io.\n"); ++ } ++ xen_hyper_store_domain_context(dc, dom_io, domain_struct); ++ xhdt->dom_io = dc; ++ dc++; ++ ++ /* restore an dom_xen context. */ ++ get_symbol_data("dom_xen", sizeof(dom_xen), &dom_xen); ++ if ((domain_struct = xen_hyper_read_domain(dom_xen)) == NULL) { ++ error(FATAL, "cannot read dom_xen.\n"); ++ } ++ xen_hyper_store_domain_context(dc, dom_xen, domain_struct); ++ xhdt->dom_xen = dc; ++ dc++; ++ ++ /* restore an idle domain context. */ ++ for (i = 0; i < xht->idle_vcpu_size; i += XEN_HYPER_MAX_VIRT_CPUS) { ++ idle_vcpu = xht->idle_vcpu_array[i]; ++ if (idle_vcpu == 0) ++ break; ++ if (!readmem(idle_vcpu + MEMBER_OFFSET("vcpu", "domain"), KVADDR, &domain, ++ sizeof(domain), "domain", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read domain member in vcpu.\n"); ++ } ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "idle_vcpu=%lx, domain=%lx\n", idle_vcpu, domain); ++ } ++ if ((domain_struct = xen_hyper_read_domain(domain)) == NULL) { ++ error(FATAL, "cannot read idle domain.\n"); ++ } ++ xen_hyper_store_domain_context(dc, domain, domain_struct); ++ if (i == 0) ++ xhdt->idle_domain = dc; ++ dc++; ++ } ++ ++ /* restore domain contexts from dom0 symbol. */ ++ xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_DOM0, &next); ++ domain = next; ++ dom0 = dc; ++ while ((domain_struct = xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_NEXT, ++ &next)) != NULL) { ++ xen_hyper_store_domain_context(dc, domain, domain_struct); ++ domain = next; ++ dc++; ++ } ++ xhdt->dom0 = dom0; + } + + /* + * Get number of domain. + */ +-int +-xen_hyper_get_domains(void) +-{ +- ulong domain, next_in_list; +- long domain_next_in_list; +- int i, j; +- +- if (!try_get_symbol_data("hardware_domain", sizeof(void *), &domain)) +- get_symbol_data("dom0", sizeof(void *), &domain); +- +- domain_next_in_list = MEMBER_OFFSET("domain", "next_in_list"); +- i = 0; +- while (domain != 0) { +- i++; +- next_in_list = domain + domain_next_in_list; +- if (!readmem(next_in_list, KVADDR, &domain, sizeof(void *), +- "domain.next_in_list", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read domain.next_in_list.\n"); +- } +- } +- i += 2; /* for dom_io, dom_xen */ +- /* for idle domains */ +- for (j = 0; j < xht->idle_vcpu_size; j += XEN_HYPER_MAX_VIRT_CPUS) { +- if (xht->idle_vcpu_array[j]) +- i++; +- } +- return i; ++int xen_hyper_get_domains(void) { ++ ulong domain, next_in_list; ++ long domain_next_in_list; ++ int i, j; ++ ++ if (!try_get_symbol_data("hardware_domain", sizeof(void *), &domain)) ++ get_symbol_data("dom0", sizeof(void *), &domain); ++ ++ domain_next_in_list = MEMBER_OFFSET("domain", "next_in_list"); ++ i = 0; ++ while (domain != 0) { ++ i++; ++ next_in_list = domain + domain_next_in_list; ++ if (!readmem(next_in_list, KVADDR, &domain, sizeof(void *), ++ "domain.next_in_list", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read domain.next_in_list.\n"); ++ } ++ } ++ i += 2; /* for dom_io, dom_xen */ ++ /* for idle domains */ ++ for (j = 0; j < xht->idle_vcpu_size; j += XEN_HYPER_MAX_VIRT_CPUS) { ++ if (xht->idle_vcpu_array[j]) ++ i++; ++ } ++ return i; + } + + /* +@@ -1090,172 +1085,156 @@ xen_hyper_get_domains(void) + * - XEN_HYPER_DOMAIN_READ_INIT:start from xhdt->context_array + * - XEN_HYPER_DOMAIN_READ_NEXT:next + */ +-char * +-xen_hyper_get_domain_next(int mod, ulong *next) +-{ +- static int idx = 0; +- +- char *domain_struct; +- struct xen_hyper_domain_context *dc; +- +- switch (mod) { +- case XEN_HYPER_DOMAIN_READ_DOM0: +- /* Case of search from dom0 symbol. */ +- idx = 0; +- if (xhdt->dom0) { +- *next = xhdt->dom0->domain; +- } else { +- if (!try_get_symbol_data("hardware_domain", sizeof(void *), next)) +- get_symbol_data("dom0", sizeof(void *), next); +- } +- return xhdt->domain_struct; +- break; +- case XEN_HYPER_DOMAIN_READ_INIT: +- /* Case of search from context_array. */ +- if (xhdt->context_array && xhdt->context_array->domain) { +- idx = 1; /* this has a next index. */ +- *next = xhdt->context_array->domain; +- } else { +- idx = 0; +- *next = 0; +- return NULL; +- } +- return xhdt->domain_struct; +- break; +- case XEN_HYPER_DOMAIN_READ_NEXT: +- break; +- default : +- error(FATAL, "xen_hyper_get_domain_next mod error: %d\n", mod); +- return NULL; +- } +- +- /* Finished search */ +- if (!*next) { +- return NULL; +- } +- +- domain_struct = NULL; +- /* Is domain context array valid? */ +- if (idx) { +- if ((domain_struct = +- xen_hyper_read_domain(*next)) == NULL) { +- error(FATAL, "cannot get next domain from domain context array.\n"); +- } +- if (idx > XEN_HYPER_NR_DOMAINS()) { +- *next = 0; +- } else { +- dc = xhdt->context_array; +- dc += idx; +- *next = dc->domain; +- idx++; +- } +- return domain_struct; +- } +- +- /* Search from dom0 symbol. */ +- if ((domain_struct = +- xen_hyper_read_domain(*next)) == NULL) { +- error(FATAL, "cannot get next domain from dom0 symbol.\n"); +- } +- *next = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_next_in_list)); +- return domain_struct; ++char *xen_hyper_get_domain_next(int mod, ulong *next) { ++ static int idx = 0; ++ ++ char *domain_struct; ++ struct xen_hyper_domain_context *dc; ++ ++ switch (mod) { ++ case XEN_HYPER_DOMAIN_READ_DOM0: ++ /* Case of search from dom0 symbol. */ ++ idx = 0; ++ if (xhdt->dom0) { ++ *next = xhdt->dom0->domain; ++ } else { ++ if (!try_get_symbol_data("hardware_domain", sizeof(void *), next)) ++ get_symbol_data("dom0", sizeof(void *), next); ++ } ++ return xhdt->domain_struct; ++ break; ++ case XEN_HYPER_DOMAIN_READ_INIT: ++ /* Case of search from context_array. */ ++ if (xhdt->context_array && xhdt->context_array->domain) { ++ idx = 1; /* this has a next index. */ ++ *next = xhdt->context_array->domain; ++ } else { ++ idx = 0; ++ *next = 0; ++ return NULL; ++ } ++ return xhdt->domain_struct; ++ break; ++ case XEN_HYPER_DOMAIN_READ_NEXT: ++ break; ++ default: ++ error(FATAL, "xen_hyper_get_domain_next mod error: %d\n", mod); ++ return NULL; ++ } ++ ++ /* Finished search */ ++ if (!*next) { ++ return NULL; ++ } ++ ++ domain_struct = NULL; ++ /* Is domain context array valid? */ ++ if (idx) { ++ if ((domain_struct = xen_hyper_read_domain(*next)) == NULL) { ++ error(FATAL, "cannot get next domain from domain context array.\n"); ++ } ++ if (idx > XEN_HYPER_NR_DOMAINS()) { ++ *next = 0; ++ } else { ++ dc = xhdt->context_array; ++ dc += idx; ++ *next = dc->domain; ++ idx++; ++ } ++ return domain_struct; ++ } ++ ++ /* Search from dom0 symbol. */ ++ if ((domain_struct = xen_hyper_read_domain(*next)) == NULL) { ++ error(FATAL, "cannot get next domain from dom0 symbol.\n"); ++ } ++ *next = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_next_in_list)); ++ return domain_struct; + } + + /* + * from domain address to id. + */ +-domid_t +-xen_hyper_domain_to_id(ulong domain) +-{ +- struct xen_hyper_domain_context *dc; +- +- /* Is domain context array valid? */ +- if (xhdt->context_array && xhdt->context_array->domain) { +- if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) { +- return XEN_HYPER_DOMAIN_ID_INVALID; +- } else { +- return dc->domain_id; +- } +- } else { +- return XEN_HYPER_DOMAIN_ID_INVALID; +- } ++domid_t xen_hyper_domain_to_id(ulong domain) { ++ struct xen_hyper_domain_context *dc; ++ ++ /* Is domain context array valid? */ ++ if (xhdt->context_array && xhdt->context_array->domain) { ++ if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) { ++ return XEN_HYPER_DOMAIN_ID_INVALID; ++ } else { ++ return dc->domain_id; ++ } ++ } else { ++ return XEN_HYPER_DOMAIN_ID_INVALID; ++ } + } + + /* + * Get domain struct from id. + */ +-char * +-xen_hyper_id_to_domain_struct(domid_t id) +-{ +- char *domain_struct; +- struct xen_hyper_domain_context *dc; +- +- domain_struct = NULL; +- +- /* Is domain context array valid? */ +- if (xhdt->context_array && xhdt->context_array->domain) { +- if ((dc = xen_hyper_id_to_domain_context(id)) == NULL) { +- return NULL; +- } else { +- if ((domain_struct = +- xen_hyper_read_domain(dc->domain)) == NULL) { +- error(FATAL, "cannot get domain from domain context array with id.\n"); +- } +- return domain_struct; +- } +- } else { +- return NULL; +- } ++char *xen_hyper_id_to_domain_struct(domid_t id) { ++ char *domain_struct; ++ struct xen_hyper_domain_context *dc; ++ ++ domain_struct = NULL; ++ ++ /* Is domain context array valid? */ ++ if (xhdt->context_array && xhdt->context_array->domain) { ++ if ((dc = xen_hyper_id_to_domain_context(id)) == NULL) { ++ return NULL; ++ } else { ++ if ((domain_struct = xen_hyper_read_domain(dc->domain)) == NULL) { ++ error(FATAL, "cannot get domain from domain context array with id.\n"); ++ } ++ return domain_struct; ++ } ++ } else { ++ return NULL; ++ } + } + + /* + * Get domain context from domain address. + */ + struct xen_hyper_domain_context * +-xen_hyper_domain_to_domain_context(ulong domain) +-{ +- struct xen_hyper_domain_context *dc; +- int i; +- +- if (xhdt->context_array == NULL || +- xhdt->context_array->domain == 0) { +- return NULL; +- } +- if (!domain) { +- return NULL; +- } +- for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); +- i++, dc++) { +- if (domain == dc->domain) { +- return dc; +- } +- } +- return NULL; ++xen_hyper_domain_to_domain_context(ulong domain) { ++ struct xen_hyper_domain_context *dc; ++ int i; ++ ++ if (xhdt->context_array == NULL || xhdt->context_array->domain == 0) { ++ return NULL; ++ } ++ if (!domain) { ++ return NULL; ++ } ++ for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); i++, dc++) { ++ if (domain == dc->domain) { ++ return dc; ++ } ++ } ++ return NULL; + } + + /* + * Get domain context from domain id. + */ +-struct xen_hyper_domain_context * +-xen_hyper_id_to_domain_context(domid_t id) +-{ +- struct xen_hyper_domain_context *dc; +- int i; +- +- if (xhdt->context_array == NULL || +- xhdt->context_array->domain == 0) { +- return NULL; +- } +- if (id == XEN_HYPER_DOMAIN_ID_INVALID) { +- return NULL; +- } +- for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); +- i++, dc++) { +- if (id == dc->domain_id) { +- return dc; +- } +- } +- return NULL; ++struct xen_hyper_domain_context *xen_hyper_id_to_domain_context(domid_t id) { ++ struct xen_hyper_domain_context *dc; ++ int i; ++ ++ if (xhdt->context_array == NULL || xhdt->context_array->domain == 0) { ++ return NULL; ++ } ++ if (id == XEN_HYPER_DOMAIN_ID_INVALID) { ++ return NULL; ++ } ++ for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); i++, dc++) { ++ if (id == dc->domain_id) { ++ return dc; ++ } ++ } ++ return NULL; + } + + /* +@@ -1263,508 +1242,452 @@ xen_hyper_id_to_domain_context(domid_t i + */ + struct xen_hyper_domain_context * + xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc, +- ulong domain, char *dp) +-{ +- char *vcpup; +- unsigned int max_vcpus; +- unsigned int i; +- +- dc->domain = domain; +- BCOPY((char *)(dp + XEN_HYPER_OFFSET(domain_domain_id)), +- &dc->domain_id, sizeof(domid_t)); +- dc->tot_pages = UINT(dp + XEN_HYPER_OFFSET(domain_tot_pages)); +- dc->max_pages = UINT(dp + XEN_HYPER_OFFSET(domain_max_pages)); +- dc->xenheap_pages = UINT(dp + XEN_HYPER_OFFSET(domain_xenheap_pages)); +- dc->shared_info = ULONG(dp + XEN_HYPER_OFFSET(domain_shared_info)); +- dc->sched_priv = ULONG(dp + XEN_HYPER_OFFSET(domain_sched_priv)); +- dc->next_in_list = ULONG(dp + XEN_HYPER_OFFSET(domain_next_in_list)); +- if (XEN_HYPER_VALID_MEMBER(domain_domain_flags)) +- dc->domain_flags = ULONG(dp + XEN_HYPER_OFFSET(domain_domain_flags)); +- else if (XEN_HYPER_VALID_MEMBER(domain_is_shut_down)) { +- dc->domain_flags = 0; +- if (XEN_HYPER_VALID_MEMBER(domain_is_hvm) && +- *(dp + XEN_HYPER_OFFSET(domain_is_hvm))) { +- dc->domain_flags |= XEN_HYPER_DOMS_HVM; +- } +- if (XEN_HYPER_VALID_MEMBER(domain_guest_type) && +- *(dp + XEN_HYPER_OFFSET(domain_guest_type))) { +- /* For now PVH and HVM are the same for crash. +- * and 0 is PV. +- */ +- dc->domain_flags |= XEN_HYPER_DOMS_HVM; +- } +- if (*(dp + XEN_HYPER_OFFSET(domain_is_privileged))) { +- dc->domain_flags |= XEN_HYPER_DOMS_privileged; +- } +- if (*(dp + XEN_HYPER_OFFSET(domain_debugger_attached))) { +- dc->domain_flags |= XEN_HYPER_DOMS_debugging; +- } +- if (XEN_HYPER_VALID_MEMBER(domain_is_polling) && +- *(dp + XEN_HYPER_OFFSET(domain_is_polling))) { +- dc->domain_flags |= XEN_HYPER_DOMS_polling; +- } +- if (XEN_HYPER_VALID_MEMBER(domain_is_paused_by_controller) && +- *(dp + XEN_HYPER_OFFSET(domain_is_paused_by_controller))) { +- dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause; +- } +- if (XEN_HYPER_VALID_MEMBER(domain_controller_pause_count) && +- *(dp + XEN_HYPER_OFFSET(domain_controller_pause_count))) { +- dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause; +- } +- if (*(dp + XEN_HYPER_OFFSET(domain_is_dying))) { +- dc->domain_flags |= XEN_HYPER_DOMS_dying; +- } +- if (*(dp + XEN_HYPER_OFFSET(domain_is_shutting_down))) { +- dc->domain_flags |= XEN_HYPER_DOMS_shuttingdown; +- } +- if (*(dp + XEN_HYPER_OFFSET(domain_is_shut_down))) { +- dc->domain_flags |= XEN_HYPER_DOMS_shutdown; +- } +- } else { +- dc->domain_flags = XEN_HYPER_DOMF_ERROR; +- } +- dc->evtchn = ULONG(dp + XEN_HYPER_OFFSET(domain_evtchn)); +- if (XEN_HYPER_VALID_MEMBER(domain_max_vcpus)) { +- max_vcpus = UINT(dp + XEN_HYPER_OFFSET(domain_max_vcpus)); +- } else if (XEN_HYPER_VALID_SIZE(domain_vcpu)) { +- max_vcpus = XEN_HYPER_SIZE(domain_vcpu) / sizeof(void *); +- } else { +- max_vcpus = XEN_HYPER_MAX_VIRT_CPUS; +- } +- if (!(dc->vcpu = malloc(sizeof(ulong) * max_vcpus))) { +- error(FATAL, "cannot malloc vcpu array (%d VCPUs).", +- max_vcpus); +- } +- if (MEMBER_TYPE("domain", "vcpu") == TYPE_CODE_ARRAY) +- vcpup = dp + XEN_HYPER_OFFSET(domain_vcpu); +- else { +- ulong vcpu_array = ULONG(dp + XEN_HYPER_OFFSET(domain_vcpu)); +- if (vcpu_array && max_vcpus) { +- if (!(vcpup = +- malloc(max_vcpus * sizeof(void *)))) { +- error(FATAL, "cannot malloc VCPU array for domain %lx.", +- domain); +- } +- if (!readmem(vcpu_array, KVADDR, +- vcpup, max_vcpus * sizeof(void*), +- "VCPU array", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read VCPU array for domain %lx.", +- domain); +- } +- } else { +- vcpup = NULL; +- } +- } +- if (vcpup) { +- for (i = 0; i < max_vcpus; i++) { +- dc->vcpu[i] = ULONG(vcpup + i*sizeof(void *)); +- if (dc->vcpu[i]) XEN_HYPER_NR_VCPUS_IN_DOM(dc)++; +- } +- if (vcpup != dp + XEN_HYPER_OFFSET(domain_vcpu)) { +- free(vcpup); +- } +- } ++ ulong domain, char *dp) { ++ char *vcpup; ++ unsigned int max_vcpus; ++ unsigned int i; ++ ++ dc->domain = domain; ++ BCOPY((char *)(dp + XEN_HYPER_OFFSET(domain_domain_id)), &dc->domain_id, ++ sizeof(domid_t)); ++ dc->tot_pages = UINT(dp + XEN_HYPER_OFFSET(domain_tot_pages)); ++ dc->max_pages = UINT(dp + XEN_HYPER_OFFSET(domain_max_pages)); ++ dc->xenheap_pages = UINT(dp + XEN_HYPER_OFFSET(domain_xenheap_pages)); ++ dc->shared_info = ULONG(dp + XEN_HYPER_OFFSET(domain_shared_info)); ++ dc->sched_priv = ULONG(dp + XEN_HYPER_OFFSET(domain_sched_priv)); ++ dc->next_in_list = ULONG(dp + XEN_HYPER_OFFSET(domain_next_in_list)); ++ if (XEN_HYPER_VALID_MEMBER(domain_domain_flags)) ++ dc->domain_flags = ULONG(dp + XEN_HYPER_OFFSET(domain_domain_flags)); ++ else if (XEN_HYPER_VALID_MEMBER(domain_is_shut_down)) { ++ dc->domain_flags = 0; ++ if (XEN_HYPER_VALID_MEMBER(domain_is_hvm) && ++ *(dp + XEN_HYPER_OFFSET(domain_is_hvm))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_HVM; ++ } ++ if (XEN_HYPER_VALID_MEMBER(domain_guest_type) && ++ *(dp + XEN_HYPER_OFFSET(domain_guest_type))) { ++ /* For now PVH and HVM are the same for crash. ++ * and 0 is PV. ++ */ ++ dc->domain_flags |= XEN_HYPER_DOMS_HVM; ++ } ++ if (*(dp + XEN_HYPER_OFFSET(domain_is_privileged))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_privileged; ++ } ++ if (*(dp + XEN_HYPER_OFFSET(domain_debugger_attached))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_debugging; ++ } ++ if (XEN_HYPER_VALID_MEMBER(domain_is_polling) && ++ *(dp + XEN_HYPER_OFFSET(domain_is_polling))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_polling; ++ } ++ if (XEN_HYPER_VALID_MEMBER(domain_is_paused_by_controller) && ++ *(dp + XEN_HYPER_OFFSET(domain_is_paused_by_controller))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause; ++ } ++ if (XEN_HYPER_VALID_MEMBER(domain_controller_pause_count) && ++ *(dp + XEN_HYPER_OFFSET(domain_controller_pause_count))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause; ++ } ++ if (*(dp + XEN_HYPER_OFFSET(domain_is_dying))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_dying; ++ } ++ if (*(dp + XEN_HYPER_OFFSET(domain_is_shutting_down))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_shuttingdown; ++ } ++ if (*(dp + XEN_HYPER_OFFSET(domain_is_shut_down))) { ++ dc->domain_flags |= XEN_HYPER_DOMS_shutdown; ++ } ++ } else { ++ dc->domain_flags = XEN_HYPER_DOMF_ERROR; ++ } ++ dc->evtchn = ULONG(dp + XEN_HYPER_OFFSET(domain_evtchn)); ++ if (XEN_HYPER_VALID_MEMBER(domain_max_vcpus)) { ++ max_vcpus = UINT(dp + XEN_HYPER_OFFSET(domain_max_vcpus)); ++ } else if (XEN_HYPER_VALID_SIZE(domain_vcpu)) { ++ max_vcpus = XEN_HYPER_SIZE(domain_vcpu) / sizeof(void *); ++ } else { ++ max_vcpus = XEN_HYPER_MAX_VIRT_CPUS; ++ } ++ if (!(dc->vcpu = malloc(sizeof(ulong) * max_vcpus))) { ++ error(FATAL, "cannot malloc vcpu array (%d VCPUs).", max_vcpus); ++ } ++ if (MEMBER_TYPE("domain", "vcpu") == TYPE_CODE_ARRAY) ++ vcpup = dp + XEN_HYPER_OFFSET(domain_vcpu); ++ else { ++ ulong vcpu_array = ULONG(dp + XEN_HYPER_OFFSET(domain_vcpu)); ++ if (vcpu_array && max_vcpus) { ++ if (!(vcpup = malloc(max_vcpus * sizeof(void *)))) { ++ error(FATAL, "cannot malloc VCPU array for domain %lx.", domain); ++ } ++ if (!readmem(vcpu_array, KVADDR, vcpup, max_vcpus * sizeof(void *), ++ "VCPU array", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read VCPU array for domain %lx.", domain); ++ } ++ } else { ++ vcpup = NULL; ++ } ++ } ++ if (vcpup) { ++ for (i = 0; i < max_vcpus; i++) { ++ dc->vcpu[i] = ULONG(vcpup + i * sizeof(void *)); ++ if (dc->vcpu[i]) ++ XEN_HYPER_NR_VCPUS_IN_DOM(dc)++; ++ } ++ if (vcpup != dp + XEN_HYPER_OFFSET(domain_vcpu)) { ++ free(vcpup); ++ } ++ } + +- return dc; ++ return dc; + } + + /* + * Read domain struct from domain context. + */ +-char * +-xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc) +-{ +- return xen_hyper_fill_domain_struct(dc->domain, xhdt->domain_struct); ++char *xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc) { ++ return xen_hyper_fill_domain_struct(dc->domain, xhdt->domain_struct); + } + + /* + * Read domain struct. + */ +-char * +-xen_hyper_read_domain(ulong domain) +-{ +- return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct); ++char *xen_hyper_read_domain(ulong domain) { ++ return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct); + } + + /* + * Read domain struct to verification. + */ +-char * +-xen_hyper_read_domain_verify(ulong domain) +-{ +- return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct_verify); ++char *xen_hyper_read_domain_verify(ulong domain) { ++ return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct_verify); + } + + /* + * Fill domain struct. + */ +-char * +-xen_hyper_fill_domain_struct(ulong domain, char *domain_struct) +-{ +- if (!readmem(domain, KVADDR, domain_struct, +- XEN_HYPER_SIZE(domain), "fill_domain_struct", +- ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { +- error(WARNING, "cannot fill domain struct.\n"); +- return NULL; +- } +- return domain_struct; ++char *xen_hyper_fill_domain_struct(ulong domain, char *domain_struct) { ++ if (!readmem(domain, KVADDR, domain_struct, XEN_HYPER_SIZE(domain), ++ "fill_domain_struct", ++ ACTIVE() ? (RETURN_ON_ERROR | QUIET) : RETURN_ON_ERROR)) { ++ error(WARNING, "cannot fill domain struct.\n"); ++ return NULL; ++ } ++ return domain_struct; + } + + /* + * Allocate domain context space. + */ +-void +-xen_hyper_alloc_domain_context_space(int domains) +-{ +- if (xhdt->context_array == NULL) { +- if (!(xhdt->context_array = +- malloc(domains * sizeof(struct xen_hyper_domain_context)))) { +- error(FATAL, "cannot malloc context array (%d domains).", +- domains); +- } +- xhdt->context_array_cnt = domains; +- } else if (domains > xhdt->context_array_cnt) { +- struct xen_hyper_domain_context *dc; +- int i; +- for (dc = xhdt->context_array, i = 0; +- i < xhdt->context_array_cnt; ++dc, ++i) { +- if (dc->vcpu) +- free(dc->vcpu); +- } +- if (!(xhdt->context_array = +- realloc(xhdt->context_array, +- domains * sizeof(struct xen_hyper_domain_context)))) { +- error(FATAL, "cannot realloc context array (%d domains).", +- domains); +- } +- xhdt->context_array_cnt = domains; +- } +- BZERO(xhdt->context_array, +- domains * sizeof(struct xen_hyper_domain_context)); ++void xen_hyper_alloc_domain_context_space(int domains) { ++ if (xhdt->context_array == NULL) { ++ if (!(xhdt->context_array = ++ malloc(domains * sizeof(struct xen_hyper_domain_context)))) { ++ error(FATAL, "cannot malloc context array (%d domains).", domains); ++ } ++ xhdt->context_array_cnt = domains; ++ } else if (domains > xhdt->context_array_cnt) { ++ struct xen_hyper_domain_context *dc; ++ int i; ++ for (dc = xhdt->context_array, i = 0; i < xhdt->context_array_cnt; ++ ++dc, ++i) { ++ if (dc->vcpu) ++ free(dc->vcpu); ++ } ++ if (!(xhdt->context_array = ++ realloc(xhdt->context_array, ++ domains * sizeof(struct xen_hyper_domain_context)))) { ++ error(FATAL, "cannot realloc context array (%d domains).", domains); ++ } ++ xhdt->context_array_cnt = domains; ++ } ++ BZERO(xhdt->context_array, domains * sizeof(struct xen_hyper_domain_context)); + } + +- +- + /* + * Get vcpu status. + */ +-int +-xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc) +-{ +- if (ACTIVE()) { +- if (xen_hyper_read_vcpu_verify(vcc->vcpu) == NULL) { +- return XEN_HYPER_RUNSTATE_ERROR; +- } +- } +- return vcc->state; ++int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc) { ++ if (ACTIVE()) { ++ if (xen_hyper_read_vcpu_verify(vcc->vcpu) == NULL) { ++ return XEN_HYPER_RUNSTATE_ERROR; ++ } ++ } ++ return vcc->state; + } + + /* + * Allocate vcpu context space. + */ +-void +-xen_hyper_refresh_vcpu_context_space(void) +-{ +- struct xen_hyper_domain_context *dc; +- struct xen_hyper_vcpu_context_array *vcca; +- struct xen_hyper_vcpu_context *vcc; +- int i, j; +- +- if ((xhvct->flags & XEN_HYPER_VCPU_F_INIT) && !ACTIVE()) { +- return; +- } +- +- xen_hyper_alloc_vcpu_context_arrays_space(XEN_HYPER_NR_DOMAINS()); +- for (i = 0, xht->vcpus = 0, dc = xhdt->context_array, +- vcca = xhvct->vcpu_context_arrays; +- i < XEN_HYPER_NR_DOMAINS(); i++, dc++, vcca++) { +- dc->vcpu_context_array = vcca; +- xen_hyper_alloc_vcpu_context_space(vcca, +- XEN_HYPER_NR_VCPUS_IN_DOM(dc)); +- for (j = 0, vcc = vcca->context_array; +- j < XEN_HYPER_NR_VCPUS_IN_DOM(dc); j++, vcc++) { +- xen_hyper_read_vcpu(dc->vcpu[j]); +- xen_hyper_store_vcpu_context(vcc, dc->vcpu[j], +- xhvct->vcpu_struct); +- } +- if (dc == xhdt->idle_domain) { +- xhvct->idle_vcpu_context_array = vcca; +- } +- xht->vcpus += vcca->context_array_cnt; +- } ++void xen_hyper_refresh_vcpu_context_space(void) { ++ struct xen_hyper_domain_context *dc; ++ struct xen_hyper_vcpu_context_array *vcca; ++ struct xen_hyper_vcpu_context *vcc; ++ int i, j; ++ ++ if ((xhvct->flags & XEN_HYPER_VCPU_F_INIT) && !ACTIVE()) { ++ return; ++ } ++ ++ xen_hyper_alloc_vcpu_context_arrays_space(XEN_HYPER_NR_DOMAINS()); ++ for (i = 0, xht->vcpus = 0, dc = xhdt->context_array, ++ vcca = xhvct->vcpu_context_arrays; ++ i < XEN_HYPER_NR_DOMAINS(); i++, dc++, vcca++) { ++ dc->vcpu_context_array = vcca; ++ xen_hyper_alloc_vcpu_context_space(vcca, XEN_HYPER_NR_VCPUS_IN_DOM(dc)); ++ for (j = 0, vcc = vcca->context_array; j < XEN_HYPER_NR_VCPUS_IN_DOM(dc); ++ j++, vcc++) { ++ xen_hyper_read_vcpu(dc->vcpu[j]); ++ xen_hyper_store_vcpu_context(vcc, dc->vcpu[j], xhvct->vcpu_struct); ++ } ++ if (dc == xhdt->idle_domain) { ++ xhvct->idle_vcpu_context_array = vcca; ++ } ++ xht->vcpus += vcca->context_array_cnt; ++ } + } + + /* + * Get vcpu context from vcpu address. + */ +-struct xen_hyper_vcpu_context * +-xen_hyper_vcpu_to_vcpu_context(ulong vcpu) +-{ +- struct xen_hyper_vcpu_context_array *vcca; +- struct xen_hyper_vcpu_context *vcc; +- int i, j; +- +- if (!vcpu) { +- return NULL; +- } +- for (i = 0, vcca = xhvct->vcpu_context_arrays; +- i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { +- for (j = 0, vcc = vcca->context_array; +- j < vcca->context_array_cnt; j++, vcc++) { +- if (vcpu == vcc->vcpu) { +- return vcc; +- } +- } +- } +- return NULL; ++struct xen_hyper_vcpu_context *xen_hyper_vcpu_to_vcpu_context(ulong vcpu) { ++ struct xen_hyper_vcpu_context_array *vcca; ++ struct xen_hyper_vcpu_context *vcc; ++ int i, j; ++ ++ if (!vcpu) { ++ return NULL; ++ } ++ for (i = 0, vcca = xhvct->vcpu_context_arrays; ++ i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { ++ for (j = 0, vcc = vcca->context_array; j < vcca->context_array_cnt; ++ j++, vcc++) { ++ if (vcpu == vcc->vcpu) { ++ return vcc; ++ } ++ } ++ } ++ return NULL; + } + + /* + * Get vcpu context. + */ + struct xen_hyper_vcpu_context * +-xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid) +-{ +- struct xen_hyper_vcpu_context_array *vcca; +- struct xen_hyper_vcpu_context *vcc; +- int i; +- +- if (vcid == XEN_HYPER_VCPU_ID_INVALID) { +- return NULL; +- } +- if ((vcca = xen_hyper_domain_to_vcpu_context_array(domain))) { +- ; +- } else if (!(vcca = xen_hyper_domid_to_vcpu_context_array(did))) { +- return NULL; +- } +- for (i = 0, vcc = vcca->context_array; +- i < vcca->context_array_cnt; i++, vcc++) { +- if (vcid == vcc->vcpu_id) { +- return vcc; +- } +- } +- return NULL; ++xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid) { ++ struct xen_hyper_vcpu_context_array *vcca; ++ struct xen_hyper_vcpu_context *vcc; ++ int i; ++ ++ if (vcid == XEN_HYPER_VCPU_ID_INVALID) { ++ return NULL; ++ } ++ if ((vcca = xen_hyper_domain_to_vcpu_context_array(domain))) { ++ ; ++ } else if (!(vcca = xen_hyper_domid_to_vcpu_context_array(did))) { ++ return NULL; ++ } ++ for (i = 0, vcc = vcca->context_array; i < vcca->context_array_cnt; ++ i++, vcc++) { ++ if (vcid == vcc->vcpu_id) { ++ return vcc; ++ } ++ } ++ return NULL; + } + + /* + * Get pointer of a vcpu context array from domain address. + */ + struct xen_hyper_vcpu_context_array * +-xen_hyper_domain_to_vcpu_context_array(ulong domain) +-{ +- struct xen_hyper_domain_context *dc; +- +- if(!(dc = xen_hyper_domain_to_domain_context(domain))) { +- return NULL; +- } +- return dc->vcpu_context_array; ++xen_hyper_domain_to_vcpu_context_array(ulong domain) { ++ struct xen_hyper_domain_context *dc; ++ ++ if (!(dc = xen_hyper_domain_to_domain_context(domain))) { ++ return NULL; ++ } ++ return dc->vcpu_context_array; + } + + /* + * Get pointer of a vcpu context array from domain id. + */ + struct xen_hyper_vcpu_context_array * +-xen_hyper_domid_to_vcpu_context_array(domid_t id) +-{ +- struct xen_hyper_domain_context *dc; +- +- if (!(dc = xen_hyper_id_to_domain_context(id))) { +- return NULL; +- } +- return dc->vcpu_context_array; ++xen_hyper_domid_to_vcpu_context_array(domid_t id) { ++ struct xen_hyper_domain_context *dc; ++ ++ if (!(dc = xen_hyper_id_to_domain_context(id))) { ++ return NULL; ++ } ++ return dc->vcpu_context_array; + } + + /* + * Store vcpu struct contents. + */ + struct xen_hyper_vcpu_context * +-xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, +- ulong vcpu, char *vcp) +-{ +- vcc->vcpu = vcpu; +- vcc->vcpu_id = INT(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_id)); +- vcc->processor = INT(vcp + XEN_HYPER_OFFSET(vcpu_processor)); +- vcc->vcpu_info = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_info)); +- vcc->domain = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_domain)); +- vcc->next_in_list = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_next_in_list)); +- if (XEN_HYPER_VALID_MEMBER(vcpu_sleep_tick)) +- vcc->sleep_tick = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sleep_tick)); +- vcc->sched_priv = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sched_priv)); +- vcc->state = INT(vcp + XEN_HYPER_OFFSET(vcpu_runstate) + +- XEN_HYPER_OFFSET(vcpu_runstate_info_state)); +- vcc->state_entry_time = ULONGLONG(vcp + +- XEN_HYPER_OFFSET(vcpu_runstate) + +- XEN_HYPER_OFFSET(vcpu_runstate_info_state_entry_time)); +- vcc->runstate_guest = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate_guest)); +- if (XEN_HYPER_VALID_MEMBER(vcpu_vcpu_flags)) +- vcc->vcpu_flags = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_flags)); +- else +- vcc->vcpu_flags = XEN_HYPER_VCPUF_ERROR; +- return vcc; ++xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, ulong vcpu, ++ char *vcp) { ++ vcc->vcpu = vcpu; ++ vcc->vcpu_id = INT(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_id)); ++ vcc->processor = INT(vcp + XEN_HYPER_OFFSET(vcpu_processor)); ++ vcc->vcpu_info = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_info)); ++ vcc->domain = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_domain)); ++ vcc->next_in_list = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_next_in_list)); ++ if (XEN_HYPER_VALID_MEMBER(vcpu_sleep_tick)) ++ vcc->sleep_tick = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sleep_tick)); ++ vcc->sched_priv = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sched_priv)); ++ vcc->state = INT(vcp + XEN_HYPER_OFFSET(vcpu_runstate) + ++ XEN_HYPER_OFFSET(vcpu_runstate_info_state)); ++ vcc->state_entry_time = ++ ULONGLONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate) + ++ XEN_HYPER_OFFSET(vcpu_runstate_info_state_entry_time)); ++ vcc->runstate_guest = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate_guest)); ++ if (XEN_HYPER_VALID_MEMBER(vcpu_vcpu_flags)) ++ vcc->vcpu_flags = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_flags)); ++ else ++ vcc->vcpu_flags = XEN_HYPER_VCPUF_ERROR; ++ return vcc; + } + + /* + * Read vcpu struct from vcpu context. + */ +-char * +-xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc) +-{ +- return xen_hyper_fill_vcpu_struct(vcc->vcpu, xhvct->vcpu_struct); ++char *xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc) { ++ return xen_hyper_fill_vcpu_struct(vcc->vcpu, xhvct->vcpu_struct); + } + + /* + * Read vcpu struct. + */ +-char * +-xen_hyper_read_vcpu(ulong vcpu) +-{ +- return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct); ++char *xen_hyper_read_vcpu(ulong vcpu) { ++ return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct); + } + + /* + * Read vcpu struct to verification. + */ +-char * +-xen_hyper_read_vcpu_verify(ulong vcpu) +-{ +- return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct_verify); ++char *xen_hyper_read_vcpu_verify(ulong vcpu) { ++ return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct_verify); + } + + /* + * Fill vcpu struct. + */ +-char * +-xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct) +-{ +- if (!readmem(vcpu, KVADDR, vcpu_struct, +- XEN_HYPER_SIZE(vcpu), "fill_vcpu_struct", +- ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { +- error(WARNING, "cannot fill vcpu struct.\n"); +- return NULL; +- } +- return vcpu_struct; ++char *xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct) { ++ if (!readmem(vcpu, KVADDR, vcpu_struct, XEN_HYPER_SIZE(vcpu), ++ "fill_vcpu_struct", ++ ACTIVE() ? (RETURN_ON_ERROR | QUIET) : RETURN_ON_ERROR)) { ++ error(WARNING, "cannot fill vcpu struct.\n"); ++ return NULL; ++ } ++ return vcpu_struct; + } + + /* + * Allocate vcpu context arrays space. + */ +-void +-xen_hyper_alloc_vcpu_context_arrays_space(int domains) +-{ +- struct xen_hyper_vcpu_context_array *vcca; +- +- if (xhvct->vcpu_context_arrays == NULL) { +- if (!(xhvct->vcpu_context_arrays = +- malloc(domains * sizeof(struct xen_hyper_vcpu_context_array)))) { +- error(FATAL, "cannot malloc context arrays (%d domains).", +- domains); +- } +- BZERO(xhvct->vcpu_context_arrays, domains * sizeof(struct xen_hyper_vcpu_context_array)); +- xhvct->vcpu_context_arrays_cnt = domains; +- } else if (domains > xhvct->vcpu_context_arrays_cnt) { +- if (!(xhvct->vcpu_context_arrays = +- realloc(xhvct->vcpu_context_arrays, +- domains * sizeof(struct xen_hyper_vcpu_context_array)))) { +- error(FATAL, "cannot realloc context arrays (%d domains).", +- domains); +- } +- vcca = xhvct->vcpu_context_arrays + domains; +- BZERO(vcca, (domains - xhvct->vcpu_context_arrays_cnt) * +- sizeof(struct xen_hyper_vcpu_context_array)); +- xhvct->vcpu_context_arrays_cnt = domains; +- } ++void xen_hyper_alloc_vcpu_context_arrays_space(int domains) { ++ struct xen_hyper_vcpu_context_array *vcca; ++ ++ if (xhvct->vcpu_context_arrays == NULL) { ++ if (!(xhvct->vcpu_context_arrays = ++ malloc(domains * sizeof(struct xen_hyper_vcpu_context_array)))) { ++ error(FATAL, "cannot malloc context arrays (%d domains).", domains); ++ } ++ BZERO(xhvct->vcpu_context_arrays, ++ domains * sizeof(struct xen_hyper_vcpu_context_array)); ++ xhvct->vcpu_context_arrays_cnt = domains; ++ } else if (domains > xhvct->vcpu_context_arrays_cnt) { ++ if (!(xhvct->vcpu_context_arrays = ++ realloc(xhvct->vcpu_context_arrays, ++ domains * sizeof(struct xen_hyper_vcpu_context_array)))) { ++ error(FATAL, "cannot realloc context arrays (%d domains).", domains); ++ } ++ vcca = xhvct->vcpu_context_arrays + domains; ++ BZERO(vcca, (domains - xhvct->vcpu_context_arrays_cnt) * ++ sizeof(struct xen_hyper_vcpu_context_array)); ++ xhvct->vcpu_context_arrays_cnt = domains; ++ } + } + + /* + * Allocate vcpu context space. + */ +-void +-xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus) +-{ +- if (!vcpus) { +- if (vcca->context_array != NULL) { +- free(vcca->context_array); +- vcca->context_array = NULL; +- } +- vcca->context_array_cnt = vcpus; +- } else if (vcca->context_array == NULL) { +- if (!(vcca->context_array = +- malloc(vcpus * sizeof(struct xen_hyper_vcpu_context)))) { +- error(FATAL, "cannot malloc context array (%d vcpus).", +- vcpus); +- } +- vcca->context_array_cnt = vcpus; +- } else if (vcpus > vcca->context_array_cnt) { +- if (!(vcca->context_array = +- realloc(vcca->context_array, +- vcpus * sizeof(struct xen_hyper_vcpu_context_array)))) { +- error(FATAL, "cannot realloc context array (%d vcpus).", +- vcpus); +- } +- vcca->context_array_cnt = vcpus; +- } +- vcca->context_array_valid = vcpus; +- BZERO(vcca->context_array, vcpus * sizeof(struct xen_hyper_vcpu_context)); ++void xen_hyper_alloc_vcpu_context_space( ++ struct xen_hyper_vcpu_context_array *vcca, int vcpus) { ++ if (!vcpus) { ++ if (vcca->context_array != NULL) { ++ free(vcca->context_array); ++ vcca->context_array = NULL; ++ } ++ vcca->context_array_cnt = vcpus; ++ } else if (vcca->context_array == NULL) { ++ if (!(vcca->context_array = ++ malloc(vcpus * sizeof(struct xen_hyper_vcpu_context)))) { ++ error(FATAL, "cannot malloc context array (%d vcpus).", vcpus); ++ } ++ vcca->context_array_cnt = vcpus; ++ } else if (vcpus > vcca->context_array_cnt) { ++ if (!(vcca->context_array = ++ realloc(vcca->context_array, ++ vcpus * sizeof(struct xen_hyper_vcpu_context_array)))) { ++ error(FATAL, "cannot realloc context array (%d vcpus).", vcpus); ++ } ++ vcca->context_array_cnt = vcpus; ++ } ++ vcca->context_array_valid = vcpus; ++ BZERO(vcca->context_array, vcpus * sizeof(struct xen_hyper_vcpu_context)); + } + +- +- + /* + * Get pcpu context from pcpu id. + */ +-struct xen_hyper_pcpu_context * +-xen_hyper_id_to_pcpu_context(uint id) +-{ +- if (xhpct->context_array == NULL) { +- return NULL; +- } +- if (!xen_hyper_test_pcpu_id(id)) { +- return NULL; +- } +- return &xhpct->context_array[id]; ++struct xen_hyper_pcpu_context *xen_hyper_id_to_pcpu_context(uint id) { ++ if (xhpct->context_array == NULL) { ++ return NULL; ++ } ++ if (!xen_hyper_test_pcpu_id(id)) { ++ return NULL; ++ } ++ return &xhpct->context_array[id]; + } + + /* + * Get pcpu context from pcpu address. + */ +-struct xen_hyper_pcpu_context * +-xen_hyper_pcpu_to_pcpu_context(ulong pcpu) +-{ +- struct xen_hyper_pcpu_context *pcc; +- int i; +- uint cpuid; +- +- if (xhpct->context_array == NULL) { +- return NULL; +- } +- if (!pcpu) { +- return NULL; +- } +- for_cpu_indexes(i, cpuid) +- { +- pcc = &xhpct->context_array[cpuid]; +- if (pcpu == pcc->pcpu) { +- return pcc; +- } +- } +- return NULL; ++struct xen_hyper_pcpu_context *xen_hyper_pcpu_to_pcpu_context(ulong pcpu) { ++ struct xen_hyper_pcpu_context *pcc; ++ int i; ++ uint cpuid; ++ ++ if (xhpct->context_array == NULL) { ++ return NULL; ++ } ++ if (!pcpu) { ++ return NULL; ++ } ++ for_cpu_indexes(i, cpuid) { ++ pcc = &xhpct->context_array[cpuid]; ++ if (pcpu == pcc->pcpu) { ++ return pcc; ++ } ++ } ++ return NULL; + } + + /* + * Store pcpu struct contents. + */ + struct xen_hyper_pcpu_context * +-xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, +- ulong pcpu, char *pcp) +-{ +- pcc->pcpu = pcpu; +- pcc->processor_id = +- UINT(pcp + XEN_HYPER_OFFSET(cpu_info_processor_id)); +- pcc->guest_cpu_user_regs = (ulong)(pcpu + +- XEN_HYPER_OFFSET(cpu_info_guest_cpu_user_regs)); +- pcc->current_vcpu = +- ULONG(pcp + XEN_HYPER_OFFSET(cpu_info_current_vcpu)); +- return pcc; ++xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, ulong pcpu, ++ char *pcp) { ++ pcc->pcpu = pcpu; ++ pcc->processor_id = UINT(pcp + XEN_HYPER_OFFSET(cpu_info_processor_id)); ++ pcc->guest_cpu_user_regs = ++ (ulong)(pcpu + XEN_HYPER_OFFSET(cpu_info_guest_cpu_user_regs)); ++ pcc->current_vcpu = ULONG(pcp + XEN_HYPER_OFFSET(cpu_info_current_vcpu)); ++ return pcc; + } + + /* +@@ -1772,404 +1695,355 @@ xen_hyper_store_pcpu_context(struct xen_ + */ + struct xen_hyper_pcpu_context * + xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc, +- ulong init_tss, char *tss) +-{ +- int i; +- uint64_t *ist_p; +- +- pcc->init_tss = init_tss; +- if (machine_type("X86")) { +- pcc->sp.esp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_esp0)); +- } else if (machine_type("X86_64")) { +- pcc->sp.rsp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_rsp0)); +- ist_p = (uint64_t *)(tss + XEN_HYPER_OFFSET(tss_ist)); +- for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++, ist_p++) { +- pcc->ist[i] = ULONG(ist_p); +- } +- } +- return pcc; ++ ulong init_tss, char *tss) { ++ int i; ++ uint64_t *ist_p; ++ ++ pcc->init_tss = init_tss; ++ if (machine_type("X86")) { ++ pcc->sp.esp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_esp0)); ++ } else if (machine_type("X86_64")) { ++ pcc->sp.rsp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_rsp0)); ++ ist_p = (uint64_t *)(tss + XEN_HYPER_OFFSET(tss_ist)); ++ for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++, ist_p++) { ++ pcc->ist[i] = ULONG(ist_p); ++ } ++ } ++ return pcc; + } + + /* + * Read pcpu struct. + */ +-char * +-xen_hyper_read_pcpu(ulong pcpu) +-{ +- return xen_hyper_fill_pcpu_struct(pcpu, xhpct->pcpu_struct); ++char *xen_hyper_read_pcpu(ulong pcpu) { ++ return xen_hyper_fill_pcpu_struct(pcpu, xhpct->pcpu_struct); + } + + /* + * Fill pcpu struct. + */ +-char * +-xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct) +-{ +- if (!readmem(pcpu, KVADDR, pcpu_struct, +- XEN_HYPER_SIZE(cpu_info), "fill_pcpu_struct", +- ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { +- error(WARNING, "cannot fill pcpu_struct.\n"); +- return NULL; +- } +- return pcpu_struct; ++char *xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct) { ++ if (!readmem(pcpu, KVADDR, pcpu_struct, XEN_HYPER_SIZE(cpu_info), ++ "fill_pcpu_struct", ++ ACTIVE() ? (RETURN_ON_ERROR | QUIET) : RETURN_ON_ERROR)) { ++ error(WARNING, "cannot fill pcpu_struct.\n"); ++ return NULL; ++ } ++ return pcpu_struct; + } + + /* + * Allocate pcpu context space. + */ +-void +-xen_hyper_alloc_pcpu_context_space(int pcpus) +-{ +- if (xhpct->context_array == NULL) { +- if (!(xhpct->context_array = +- malloc(pcpus * sizeof(struct xen_hyper_pcpu_context)))) { +- error(FATAL, "cannot malloc context array (%d pcpus).", +- pcpus); +- } +- } +- BZERO(xhpct->context_array, pcpus * sizeof(struct xen_hyper_pcpu_context)); ++void xen_hyper_alloc_pcpu_context_space(int pcpus) { ++ if (xhpct->context_array == NULL) { ++ if (!(xhpct->context_array = ++ malloc(pcpus * sizeof(struct xen_hyper_pcpu_context)))) { ++ error(FATAL, "cannot malloc context array (%d pcpus).", pcpus); ++ } ++ } ++ BZERO(xhpct->context_array, pcpus * sizeof(struct xen_hyper_pcpu_context)); + } + +- +- + /* + * Fill cpu_data. + */ +-char * +-xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86) +-{ +- ulong cpu_data; +- +- if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address) +- return NULL; +- cpu_data = xht->cpu_data_address + XEN_HYPER_SIZE(cpuinfo_x86) * idx; +- if (!readmem(cpu_data, KVADDR, cpuinfo_x86, XEN_HYPER_SIZE(cpuinfo_x86), +- "cpu_data", RETURN_ON_ERROR)) { +- error(WARNING, "cannot read cpu_data.\n"); +- return NULL; +- } +- return cpuinfo_x86; +-} +- +-char * +-xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64) +-{ +- ulong cpu_data; +- +- if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address) +- return NULL; +- cpu_data = xen_hyper_per_cpu(xht->cpu_data_address, idx); +- if (!readmem(cpu_data, KVADDR, cpuinfo_ia64, XEN_HYPER_SIZE(cpuinfo_ia64), +- "cpu_data", RETURN_ON_ERROR)) { +- error(WARNING, "cannot read cpu_data.\n"); +- return NULL; +- } +- return cpuinfo_ia64; ++char *xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86) { ++ ulong cpu_data; ++ ++ if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address) ++ return NULL; ++ cpu_data = xht->cpu_data_address + XEN_HYPER_SIZE(cpuinfo_x86) * idx; ++ if (!readmem(cpu_data, KVADDR, cpuinfo_x86, XEN_HYPER_SIZE(cpuinfo_x86), ++ "cpu_data", RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read cpu_data.\n"); ++ return NULL; ++ } ++ return cpuinfo_x86; ++} ++ ++char *xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64) { ++ ulong cpu_data; ++ ++ if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address) ++ return NULL; ++ cpu_data = xen_hyper_per_cpu(xht->cpu_data_address, idx); ++ if (!readmem(cpu_data, KVADDR, cpuinfo_ia64, XEN_HYPER_SIZE(cpuinfo_ia64), ++ "cpu_data", RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read cpu_data.\n"); ++ return NULL; ++ } ++ return cpuinfo_ia64; + } + + /* + * Return whether vcpu is crashing. + */ +-int +-xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc) +-{ +- if (vcc == xht->crashing_vcc) +- return TRUE; +- return FALSE; ++int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc) { ++ if (vcc == xht->crashing_vcc) ++ return TRUE; ++ return FALSE; + } + + /* + * Test whether cpu for pcpu id exists. + */ +-int +-xen_hyper_test_pcpu_id(uint pcpu_id) +-{ +- ulong *cpumask = xht->cpumask; +- uint i, j; +- +- if (pcpu_id == XEN_HYPER_PCPU_ID_INVALID || +- pcpu_id > XEN_HYPER_MAX_CPUS()) { +- return FALSE; +- } +- +- i = pcpu_id / (sizeof(ulong) * 8); +- j = pcpu_id % (sizeof(ulong) * 8); +- cpumask += i; +- if (*cpumask & (1UL << j)) { +- return TRUE; +- } else { +- return FALSE; +- } ++int xen_hyper_test_pcpu_id(uint pcpu_id) { ++ ulong *cpumask = xht->cpumask; ++ uint i, j; ++ ++ if (pcpu_id == XEN_HYPER_PCPU_ID_INVALID || pcpu_id > XEN_HYPER_MAX_CPUS()) { ++ return FALSE; ++ } ++ ++ i = pcpu_id / (sizeof(ulong) * 8); ++ j = pcpu_id % (sizeof(ulong) * 8); ++ cpumask += i; ++ if (*cpumask & (1UL << j)) { ++ return TRUE; ++ } else { ++ return FALSE; ++ } + } + +- +- + /* + * Calculate and return the uptime. + */ +-ulonglong +-xen_hyper_get_uptime_hyper(void) +-{ +- ulong jiffies, tmp1, tmp2; +- ulonglong jiffies_64, wrapped; +- +- if (symbol_exists("jiffies_64")) { +- get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64); +- wrapped = (jiffies_64 & 0xffffffff00000000ULL); +- if (wrapped) { +- wrapped -= 0x100000000ULL; +- jiffies_64 &= 0x00000000ffffffffULL; +- jiffies_64 |= wrapped; +- jiffies_64 += (ulonglong)(300*machdep->hz); +- } else { +- tmp1 = (ulong)(uint)(-300*machdep->hz); +- tmp2 = (ulong)jiffies_64; +- jiffies_64 = (ulonglong)(tmp2 - tmp1); +- } +- } else if (symbol_exists("jiffies")) { +- get_symbol_data("jiffies", sizeof(long), &jiffies); +- jiffies_64 = (ulonglong)jiffies; +- } else { +- jiffies_64 = 0; /* hypervisor does not have uptime */ +- } ++ulonglong xen_hyper_get_uptime_hyper(void) { ++ ulong jiffies, tmp1, tmp2; ++ ulonglong jiffies_64, wrapped; ++ ++ if (symbol_exists("jiffies_64")) { ++ get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64); ++ wrapped = (jiffies_64 & 0xffffffff00000000ULL); ++ if (wrapped) { ++ wrapped -= 0x100000000ULL; ++ jiffies_64 &= 0x00000000ffffffffULL; ++ jiffies_64 |= wrapped; ++ jiffies_64 += (ulonglong)(300 * machdep->hz); ++ } else { ++ tmp1 = (ulong)(uint)(-300 * machdep->hz); ++ tmp2 = (ulong)jiffies_64; ++ jiffies_64 = (ulonglong)(tmp2 - tmp1); ++ } ++ } else if (symbol_exists("jiffies")) { ++ get_symbol_data("jiffies", sizeof(long), &jiffies); ++ jiffies_64 = (ulonglong)jiffies; ++ } else { ++ jiffies_64 = 0; /* hypervisor does not have uptime */ ++ } + +- return jiffies_64; ++ return jiffies_64; + } + + /* + * Get cpu informatin around. + */ +-void +-xen_hyper_get_cpu_info(void) +-{ +- ulong addr, init_begin, init_end; +- ulong *cpumask; +- uint *cpu_idx; +- int i, j, cpus; +- +- XEN_HYPER_STRUCT_SIZE_INIT(cpumask_t, "cpumask_t"); +- +- if (symbol_exists("nr_cpu_ids")) +- get_symbol_data("nr_cpu_ids", sizeof(uint), &xht->max_cpus); +- else { +- init_begin = symbol_value("__init_begin"); +- init_end = symbol_value("__init_end"); +- addr = symbol_value("max_cpus"); +- +- if (addr >= init_begin && addr < init_end) +- xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8; +- else { +- get_symbol_data("max_cpus", sizeof(xht->max_cpus), &xht->max_cpus); +- if (XEN_HYPER_SIZE(cpumask_t) * 8 > xht->max_cpus) +- xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8; +- } +- } +- +- if (xht->cpumask) { +- free(xht->cpumask); +- } +- if((xht->cpumask = malloc(XEN_HYPER_SIZE(cpumask_t))) == NULL) { +- error(FATAL, "cannot malloc cpumask space.\n"); +- } +- addr = symbol_value("cpu_present_map"); +- if (!readmem(addr, KVADDR, xht->cpumask, +- XEN_HYPER_SIZE(cpumask_t), "cpu_present_map", RETURN_ON_ERROR)) { +- error(FATAL, "cannot read cpu_present_map.\n"); +- } +- if (xht->cpu_idxs) { +- free(xht->cpu_idxs); +- } +- if((xht->cpu_idxs = malloc(sizeof(uint) * XEN_HYPER_MAX_CPUS())) == NULL) { +- error(FATAL, "cannot malloc cpu_idxs space.\n"); +- } +- memset(xht->cpu_idxs, 0xff, sizeof(uint) * XEN_HYPER_MAX_CPUS()); +- +- for (i = cpus = 0, cpumask = xht->cpumask, cpu_idx = xht->cpu_idxs; +- i < (XEN_HYPER_SIZE(cpumask_t)/sizeof(ulong)); i++, cpumask++) { +- for (j = 0; j < sizeof(ulong) * 8; j++) { +- if (*cpumask & (1UL << j)) { +- *cpu_idx++ = i * sizeof(ulong) * 8 + j; +- cpus++; +- } +- } +- } +- xht->pcpus = cpus; ++void xen_hyper_get_cpu_info(void) { ++ ulong addr, init_begin, init_end; ++ ulong *cpumask; ++ uint *cpu_idx; ++ int i, j, cpus; ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(cpumask_t, "cpumask_t"); ++ ++ if (symbol_exists("nr_cpu_ids")) ++ get_symbol_data("nr_cpu_ids", sizeof(uint), &xht->max_cpus); ++ else { ++ init_begin = symbol_value("__init_begin"); ++ init_end = symbol_value("__init_end"); ++ addr = symbol_value("max_cpus"); ++ ++ if (addr >= init_begin && addr < init_end) ++ xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8; ++ else { ++ get_symbol_data("max_cpus", sizeof(xht->max_cpus), &xht->max_cpus); ++ if (XEN_HYPER_SIZE(cpumask_t) * 8 > xht->max_cpus) ++ xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8; ++ } ++ } ++ ++ if (xht->cpumask) { ++ free(xht->cpumask); ++ } ++ if ((xht->cpumask = malloc(XEN_HYPER_SIZE(cpumask_t))) == NULL) { ++ error(FATAL, "cannot malloc cpumask space.\n"); ++ } ++ addr = symbol_value("cpu_present_map"); ++ if (!readmem(addr, KVADDR, xht->cpumask, XEN_HYPER_SIZE(cpumask_t), ++ "cpu_present_map", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read cpu_present_map.\n"); ++ } ++ if (xht->cpu_idxs) { ++ free(xht->cpu_idxs); ++ } ++ if ((xht->cpu_idxs = malloc(sizeof(uint) * XEN_HYPER_MAX_CPUS())) == NULL) { ++ error(FATAL, "cannot malloc cpu_idxs space.\n"); ++ } ++ memset(xht->cpu_idxs, 0xff, sizeof(uint) * XEN_HYPER_MAX_CPUS()); ++ ++ for (i = cpus = 0, cpumask = xht->cpumask, cpu_idx = xht->cpu_idxs; ++ i < (XEN_HYPER_SIZE(cpumask_t) / sizeof(ulong)); i++, cpumask++) { ++ for (j = 0; j < sizeof(ulong) * 8; j++) { ++ if (*cpumask & (1UL << j)) { ++ *cpu_idx++ = i * sizeof(ulong) * 8 + j; ++ cpus++; ++ } ++ } ++ } ++ xht->pcpus = cpus; + } + + /* + * Calculate the number of physical cpu for x86. + */ +-int +-xen_hyper_x86_get_smp_cpus(void) +-{ +- if (xht->pcpus) { +- return xht->pcpus; +- } +- xen_hyper_get_cpu_info(); +- return xht->pcpus; ++int xen_hyper_x86_get_smp_cpus(void) { ++ if (xht->pcpus) { ++ return xht->pcpus; ++ } ++ xen_hyper_get_cpu_info(); ++ return xht->pcpus; + } + + /* + * Calculate used memory size for x86. + */ +-uint64_t +-xen_hyper_x86_memory_size(void) +-{ +- ulong vaddr; +- +- if (machdep->memsize) { +- return machdep->memsize; +- } +- vaddr = symbol_value("total_pages"); +- if (!readmem(vaddr, KVADDR, &xht->total_pages, sizeof(xht->total_pages), +- "total_pages", RETURN_ON_ERROR)) { +- error(WARNING, "cannot read total_pages.\n"); +- } +- xht->sys_pages = xht->total_pages; +- machdep->memsize = (uint64_t)(xht->sys_pages) * (uint64_t)(machdep->pagesize); +- return machdep->memsize; +-} ++uint64_t xen_hyper_x86_memory_size(void) { ++ ulong vaddr; + ++ if (machdep->memsize) { ++ return machdep->memsize; ++ } ++ vaddr = symbol_value("total_pages"); ++ if (!readmem(vaddr, KVADDR, &xht->total_pages, sizeof(xht->total_pages), ++ "total_pages", RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read total_pages.\n"); ++ } ++ xht->sys_pages = xht->total_pages; ++ machdep->memsize = (uint64_t)(xht->sys_pages) * (uint64_t)(machdep->pagesize); ++ return machdep->memsize; ++} + + /* + * Calculate the number of physical cpu for ia64. + */ +-int +-xen_hyper_ia64_get_smp_cpus(void) +-{ +- return xen_hyper_x86_get_smp_cpus(); +-} ++int xen_hyper_ia64_get_smp_cpus(void) { return xen_hyper_x86_get_smp_cpus(); } + + /* + * Calculate used memory size for ia64. + */ +-uint64_t +-xen_hyper_ia64_memory_size(void) +-{ +- return xen_hyper_x86_memory_size(); ++uint64_t xen_hyper_ia64_memory_size(void) { ++ return xen_hyper_x86_memory_size(); + } + +-/* +- * Calculate and return the speed of the processor. ++/* ++ * Calculate and return the speed of the processor. + */ +-ulong +-xen_hyper_ia64_processor_speed(void) +-{ +- ulong mhz, proc_freq; ++ulong xen_hyper_ia64_processor_speed(void) { ++ ulong mhz, proc_freq; + +- if (machdep->mhz) +- return(machdep->mhz); ++ if (machdep->mhz) ++ return (machdep->mhz); + +- mhz = 0; ++ mhz = 0; + +- if (!xht->cpu_data_address || +- !XEN_HYPER_VALID_STRUCT(cpuinfo_ia64) || +- XEN_HYPER_INVALID_MEMBER(cpuinfo_ia64_proc_freq)) +- return (machdep->mhz = mhz); ++ if (!xht->cpu_data_address || !XEN_HYPER_VALID_STRUCT(cpuinfo_ia64) || ++ XEN_HYPER_INVALID_MEMBER(cpuinfo_ia64_proc_freq)) ++ return (machdep->mhz = mhz); + +- readmem(xen_hyper_per_cpu(xht->cpu_data_address, xht->cpu_idxs[0]) + +- XEN_HYPER_OFFSET(cpuinfo_ia64_proc_freq), +- KVADDR, &proc_freq, sizeof(ulong), +- "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR); ++ readmem(xen_hyper_per_cpu(xht->cpu_data_address, xht->cpu_idxs[0]) + ++ XEN_HYPER_OFFSET(cpuinfo_ia64_proc_freq), ++ KVADDR, &proc_freq, sizeof(ulong), "cpuinfo_ia64 proc_freq", ++ FAULT_ON_ERROR); + +- mhz = proc_freq/1000000; ++ mhz = proc_freq / 1000000; + +- return (machdep->mhz = mhz); ++ return (machdep->mhz = mhz); + } + +- +- + /* + * Print an aligned string with specified length. + */ +-void +-xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag) +-{ +- char buf[XEN_HYPER_CMD_BUFSIZE]; +- int sl, r; +- char *s1, *s2; +- +- sl = strlen(str1); +- if (sl > len) { +- r = 0; +- } else { +- r = len - sl; +- } +- +- memset(buf, ' ', sizeof(buf)); +- buf[r] = '\0'; +- if (flag & XEN_HYPER_PRI_L) { +- s1 = str1; +- s2 = buf; +- } else { +- s1 = buf; +- s2 = str1; +- } +- if (str2) { +- fprintf(fp, "%s%s%s", s1, s2, str2); +- } else { +- fprintf(fp, "%s%s", s1, s2); +- } +- if (flag & XEN_HYPER_PRI_LF) { +- fprintf(fp, "\n"); +- } +-} +- +-ulong +-xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpuid) +-{ +- struct xen_hyper_pcpu_context *pcc; +- struct xen_hyper_vcpu_context_array *vcca; +- struct xen_hyper_vcpu_context *vcc; +- int i, j; +- +- if (!xen_hyper_test_pcpu_id(pcpuid)) +- return 0; +- +- pcc = &xhpct->context_array[pcpuid]; +- if (pcc->current_vcpu) +- return pcc->current_vcpu; +- +- for (i = 0, vcca = xhvct->vcpu_context_arrays; +- i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { +- for (j = 0, vcc = vcca->context_array; +- j < vcca->context_array_cnt; j++, vcc++) { +- if (vcc->processor == pcpuid && +- vcc->state == XEN_HYPER_RUNSTATE_running) { +- return vcc->vcpu; +- } +- } +- } +- +- return 0; +-} +- +-ulong +-xen_hyper_pcpu_to_active_vcpu(ulong pcpu) +-{ +- ulong vcpu; +- +- /* if pcpu is vcpu address, return it. */ +- if (pcpu & (~(PAGESIZE() - 1))) { +- return pcpu; +- } +- +- if(!(vcpu = XEN_HYPER_CURR_VCPU(pcpu))) +- error(FATAL, "invalid pcpu id\n"); +- return vcpu; +-} +- +-void +-xen_hyper_print_bt_header(FILE *out, ulong vcpu, int newline) +-{ +- struct xen_hyper_vcpu_context *vcc; +- +- if (newline) +- fprintf(out, "\n"); +- +- vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); +- if (!vcc) +- error(FATAL, "invalid vcpu\n"); +- fprintf(out, "PCPU: %2d VCPU: %lx\n", vcc->processor, vcpu); ++void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag) { ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ int sl, r; ++ char *s1, *s2; ++ ++ sl = strlen(str1); ++ if (sl > len) { ++ r = 0; ++ } else { ++ r = len - sl; ++ } ++ ++ memset(buf, ' ', sizeof(buf)); ++ buf[r] = '\0'; ++ if (flag & XEN_HYPER_PRI_L) { ++ s1 = str1; ++ s2 = buf; ++ } else { ++ s1 = buf; ++ s2 = str1; ++ } ++ if (str2) { ++ fprintf(fp, "%s%s%s", s1, s2, str2); ++ } else { ++ fprintf(fp, "%s%s", s1, s2); ++ } ++ if (flag & XEN_HYPER_PRI_LF) { ++ fprintf(fp, "\n"); ++ } ++} ++ ++ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpuid) { ++ struct xen_hyper_pcpu_context *pcc; ++ struct xen_hyper_vcpu_context_array *vcca; ++ struct xen_hyper_vcpu_context *vcc; ++ int i, j; ++ ++ if (!xen_hyper_test_pcpu_id(pcpuid)) ++ return 0; ++ ++ pcc = &xhpct->context_array[pcpuid]; ++ if (pcc->current_vcpu) ++ return pcc->current_vcpu; ++ ++ for (i = 0, vcca = xhvct->vcpu_context_arrays; ++ i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { ++ for (j = 0, vcc = vcca->context_array; j < vcca->context_array_cnt; ++ j++, vcc++) { ++ if (vcc->processor == pcpuid && ++ vcc->state == XEN_HYPER_RUNSTATE_running) { ++ return vcc->vcpu; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu) { ++ ulong vcpu; ++ ++ /* if pcpu is vcpu address, return it. */ ++ if (pcpu & (~(PAGESIZE() - 1))) { ++ return pcpu; ++ } ++ ++ if (!(vcpu = XEN_HYPER_CURR_VCPU(pcpu))) ++ error(FATAL, "invalid pcpu id\n"); ++ return vcpu; ++} ++ ++void xen_hyper_print_bt_header(FILE *out, ulong vcpu, int newline) { ++ struct xen_hyper_vcpu_context *vcc; ++ ++ if (newline) ++ fprintf(out, "\n"); ++ ++ vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); ++ if (!vcc) ++ error(FATAL, "invalid vcpu\n"); ++ fprintf(out, "PCPU: %2d VCPU: %lx\n", vcc->processor, vcpu); + } + #endif diff -Nru crash-8.0.2/debian/patches/series crash-8.0.2/debian/patches/series --- crash-8.0.2/debian/patches/series 2022-11-17 18:27:25.000000000 +0000 +++ crash-8.0.2/debian/patches/series 2024-01-04 06:47:25.000000000 +0000 @@ -1 +1,25 @@ 0001-dont-git-clone-eppic-extension.patch +lp2038249-0001-arm64-handle-vabits_actual-symbol-missing-case.patch +lp2038249-0002-x86_64-Fix-for-move-of-per-cpu-variables-into-struct.patch +lp2038249-0003-Fix-for-mm_struct.rss_stat-conversion-into-percpu_co.patch +lp2038249-0004-SLUB-Fix-for-offset-change-of-struct-slab-members-on.patch +lp2038249-0005-Fix-for-kmem-i-to-display-correct-SLAB-statistics-on.patch +lp2038249-0006-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.1-and-later.patch +lp2038249-0007-SLAB-Fix-for-kmem-s-S-options-on-Linux-6.2-rc1-and-l.patch +lp2038249-0008-Port-the-maple-tree-data-structures-and-functions.patch +lp2038249-0009-Add-do_maple_tree-for-maple-tree-operations.patch +lp2038249-0010-Introduce-maple-tree-vma-iteration-to-vm_area_dump.patch +lp2038249-0011-Dump-maple-tree-offset-variables-by-help-o.patch +lp2038249-0012-Fix-kmem-n-option-to-display-memory-blocks-on-Linux-.patch +lp2038249-0013-Fix-failure-of-dev-d-D-options-on-Linux-6.4-and-late.patch +lp2038249-0014-Fix-kmem-v-option-displaying-no-regions-on-Linux-6.3.patch +lp2038249-0015-x86_64-Fix-bt-command-printing-stale-entries-on-Linu.patch +lp2038249-0016-Support-module-memory-layout-change-on-Linux-6.4.patch +lp2038249-0017-Fix-failure-of-gathering-task-table-on-Linux-6.5-rc1.patch +lp2038249-0018-Fix-compilation-error-due-to-new-strlcpy-function-th.patch +lp2038249-0019-Fix-irq-a-option-on-Linux-6.0-and-later.patch +lp2038249-0020-Exclude-zero-entries-from-do_maple_tree-return-value.patch +lp2038249-0021-Fix-irq-a-s-options-on-Linux-6.5-rc1-and-later.patch +lp2038249-0022-arm64-Fix-vtop-command-to-display-swap-information-o.patch +lp2038249-0023-Fix-rd-command-to-display-data-on-zram-on-Linux-5.17.patch +lp2038249-0024-Fix-compilation-error-and-warning-with-gcc-4.8.5.patch