diff -Nru sysdig-0.8.0/debian/changelog sysdig-0.8.0/debian/changelog --- sysdig-0.8.0/debian/changelog 2017-07-17 14:57:52.000000000 +0100 +++ sysdig-0.8.0/debian/changelog 2017-10-27 15:13:11.000000000 +0100 @@ -1,3 +1,15 @@ +sysdig (0.8.0-1ubuntu2) xenial; urgency=medium + + * Fix build issues with 4.11 kernel headers (LP: #1727275) + backports from upstream sysdig to support 4.11 and HWE 4.13 + kernels using the following commits: + - [5fef96] Fix for kernel < 3.9 with CPU_ACCOUNTING_NATIVE disabled. + - [29c40f] Fix cputime for kernel 3.8 and 2.6.18 + - [4596f0] Make sure to unlock mutex during error paths + - [460f6e] Update probe module for kernel 4.11 (#829) + + -- Colin Ian King Fri, 27 Oct 2017 15:13:11 +0100 + sysdig (0.8.0-1ubuntu1) xenial; urgency=medium * Fix build issues with 4.10 HWE kernel headers (LP: #1704283) diff -Nru sysdig-0.8.0/debian/patches/0004-Fix-for-kernel-3.9-with-CPU_ACCOUNTING_NATIVE-disabl.patch sysdig-0.8.0/debian/patches/0004-Fix-for-kernel-3.9-with-CPU_ACCOUNTING_NATIVE-disabl.patch --- sysdig-0.8.0/debian/patches/0004-Fix-for-kernel-3.9-with-CPU_ACCOUNTING_NATIVE-disabl.patch 1970-01-01 01:00:00.000000000 +0100 +++ sysdig-0.8.0/debian/patches/0004-Fix-for-kernel-3.9-with-CPU_ACCOUNTING_NATIVE-disabl.patch 2017-10-27 14:26:43.000000000 +0100 @@ -0,0 +1,167 @@ +From 5fef96f4a8db7cc7028370a67d9ac05c94abf2a5 Mon Sep 17 00:00:00 2001 +From: Luca Marturana +Date: Tue, 23 Feb 2016 09:50:54 -0800 +Subject: [PATCH] Fix for kernel < 3.9 with CPU_ACCOUNTING_NATIVE disabled. Use + exposed task_cputime_adjusted for last kernels + +--- + driver/main.c | 5 +-- + driver/ppm_cputime.c | 104 ++++++++++++++++++++++++++++++++++++++------------- + 2 files changed, 81 insertions(+), 28 deletions(-) + +Index: sysdig-0.8.0/driver/main.c +=================================================================== +--- sysdig-0.8.0.orig/driver/main.c ++++ sysdig-0.8.0/driver/main.c +@@ -632,9 +632,8 @@ static long ppm_ioctl(struct file *filp, + if (nentries < pli.max_entries) { + cputime_t utime, stime; + +-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +- utime = t->utime; +- stime = t->stime; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++ task_cputime_adjusted(t, &utime, &stime); + #else + ppm_task_cputime_adjusted(t, &utime, &stime); + #endif +Index: sysdig-0.8.0/driver/ppm_cputime.c +=================================================================== +--- sysdig-0.8.0.orig/driver/ppm_cputime.c ++++ sysdig-0.8.0/driver/ppm_cputime.c +@@ -1,5 +1,8 @@ + #include +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 9, 0)) ++ ++// These function are taken from the linux kernel and are used only ++// on versions that don't export task_cputime_adjusted() ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) + + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)) + #include +@@ -95,13 +98,35 @@ void task_cputime(struct task_struct *t, + } + #endif + ++u64 nsecs_to_jiffies64(u64 n) ++{ ++#if (NSEC_PER_SEC % HZ) == 0 ++ /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ ++ return div_u64(n, NSEC_PER_SEC / HZ); ++#elif (HZ % 512) == 0 ++ /* overflow after 292 years if HZ = 1024 */ ++ return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); ++#else ++ /* ++ * Generic case - optimized for cases where HZ is a multiple of 3. ++ * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. ++ */ ++ return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); ++#endif ++} ++ ++unsigned long nsecs_to_jiffies(u64 n) ++{ ++ return (unsigned long)nsecs_to_jiffies64(n); ++} ++ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) + { + *ut = p->utime; + *st = p->stime; + } +-#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(3, 9, 0)) + /* + * Perform (stime * rtime) / total, but avoid multiplication overflow by + * loosing precision when the numbers are big. +@@ -163,28 +188,6 @@ static void cputime_advance(cputime_t *c + cmpxchg_cputime(counter, old, new); + } + +-u64 nsecs_to_jiffies64(u64 n) +-{ +-#if (NSEC_PER_SEC % HZ) == 0 +- /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ +- return div_u64(n, NSEC_PER_SEC / HZ); +-#elif (HZ % 512) == 0 +- /* overflow after 292 years if HZ = 1024 */ +- return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); +-#else +- /* +- * Generic case - optimized for cases where HZ is a multiple of 3. +- * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. +- */ +- return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); +-#endif +-} +- +-unsigned long nsecs_to_jiffies(u64 n) +-{ +- return (unsigned long)nsecs_to_jiffies64(n); +-} +- + /* + * Adjust tick based cputime random precision against scheduler + * runtime accounting. +@@ -255,6 +258,57 @@ void ppm_task_cputime_adjusted(struct ta + task_cputime(p, &cputime.utime, &cputime.stime); + cputime_adjust(&cputime, &p->prev_cputime, ut, st); + } ++ ++#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 9, 0) */ ++ ++#ifndef nsecs_to_cputime ++#ifdef msecs_to_cputime ++# define nsecs_to_cputime(__nsecs) \ ++ msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC)) ++#else ++#define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) ++#endif ++#endif ++ ++static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) ++{ ++ u64 temp = (__force u64) rtime; ++ ++ temp *= (__force u64) utime; ++ ++ if (sizeof(cputime_t) == 4) ++ temp = div_u64(temp, (__force u32) total); ++ else ++ temp = div64_u64(temp, (__force u64) total); ++ ++ return (__force cputime_t) temp; ++} ++ ++// Taken from task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) ++void ppm_task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ cputime_t rtime, utime = p->utime, total = utime + p->stime; ++ ++ /* ++ * Use CFS's precise accounting: ++ */ ++ rtime = nsecs_to_cputime(p->se.sum_exec_runtime); ++ ++ if (total) ++ utime = scale_utime(utime, rtime, total); ++ else ++ utime = rtime; ++ ++ /* ++ * Compare with previous values, to keep monotonicity: ++ */ ++ p->prev_utime = max(p->prev_utime, utime); ++ p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); ++ ++ *ut = p->prev_utime; ++ *st = p->prev_stime; ++} ++ + #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) */ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ +\ No newline at end of file diff -Nru sysdig-0.8.0/debian/patches/0005-Fix-cputime-for-kernel-3.8-and-2.6.18.patch sysdig-0.8.0/debian/patches/0005-Fix-cputime-for-kernel-3.8-and-2.6.18.patch --- sysdig-0.8.0/debian/patches/0005-Fix-cputime-for-kernel-3.8-and-2.6.18.patch 1970-01-01 01:00:00.000000000 +0100 +++ sysdig-0.8.0/debian/patches/0005-Fix-cputime-for-kernel-3.8-and-2.6.18.patch 2017-10-27 14:31:26.000000000 +0100 @@ -0,0 +1,117 @@ +From 29c40f3f7cfc5dfbf74db9dfbbaea09f5c20d266 Mon Sep 17 00:00:00 2001 +From: Luca Marturana +Date: Wed, 16 Mar 2016 17:08:55 +0100 +Subject: [PATCH] Fix cputime for kernel 3.8 and 2.6.18 + +--- + driver/ppm_cputime.c | 53 ++++++++++++++++++++++++++++++---------------------- + 1 file changed, 31 insertions(+), 22 deletions(-) + +diff --git a/driver/ppm_cputime.c b/driver/ppm_cputime.c +index 497e47c..41df619 100644 +--- a/driver/ppm_cputime.c ++++ b/driver/ppm_cputime.c +@@ -9,7 +9,6 @@ + #else + #include + #endif +-#include + #include + #include + #include +@@ -19,7 +18,6 @@ + #include + #include + #include +-#include + #include + + #include +@@ -29,6 +27,14 @@ + #include "ppm_events.h" + #include "ppm.h" + ++#if (defined CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) || (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)) ++void ppm_task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ *ut = p->utime; ++ *st = p->stime; ++} ++#else ++ + #ifndef cmpxchg_cputime + #define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) + #endif +@@ -96,7 +102,16 @@ void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) + if (stime) + *stime += sdelta; + } +-#endif ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) ++static inline void task_cputime(struct task_struct *t, ++ cputime_t *utime, cputime_t *stime) ++{ ++ if (utime) ++ *utime = t->utime; ++ if (stime) ++ *stime = t->stime; ++} ++#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ + + u64 nsecs_to_jiffies64(u64 n) + { +@@ -120,13 +135,16 @@ unsigned long nsecs_to_jiffies(u64 n) + return (unsigned long)nsecs_to_jiffies64(n); + } + +-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) +-{ +- *ut = p->utime; +- *st = p->stime; +-} +-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(3, 9, 0)) ++#ifndef nsecs_to_cputime ++#ifdef msecs_to_cputime ++#define nsecs_to_cputime(__nsecs) \ ++ msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC)) ++#else ++#define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) ++#endif ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + /* + * Perform (stime * rtime) / total, but avoid multiplication overflow by + * loosing precision when the numbers are big. +@@ -259,16 +277,7 @@ void ppm_task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t * + cputime_adjust(&cputime, &p->prev_cputime, ut, st); + } + +-#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 9, 0) */ +- +-#ifndef nsecs_to_cputime +-#ifdef msecs_to_cputime +-# define nsecs_to_cputime(__nsecs) \ +- msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC)) +-#else +-#define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) +-#endif +-#endif ++#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) */ + + static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) + { +@@ -309,6 +318,6 @@ void ppm_task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t * + *st = p->prev_stime; + } + +-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +- ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) */ ++#endif /* (defined CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) || (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)) */ + #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ +\ No newline at end of file +-- +2.7.4 + diff -Nru sysdig-0.8.0/debian/patches/0006-Make-sure-to-unlock-mutex-during-error-paths.patch sysdig-0.8.0/debian/patches/0006-Make-sure-to-unlock-mutex-during-error-paths.patch --- sysdig-0.8.0/debian/patches/0006-Make-sure-to-unlock-mutex-during-error-paths.patch 1970-01-01 01:00:00.000000000 +0100 +++ sysdig-0.8.0/debian/patches/0006-Make-sure-to-unlock-mutex-during-error-paths.patch 2017-10-27 14:47:41.000000000 +0100 @@ -0,0 +1,83 @@ +From 4596f08f873f0949fcc60b93bc13b9a3a14ef123 Mon Sep 17 00:00:00 2001 +From: Brett +Date: Wed, 22 Feb 2017 11:02:54 -0800 +Subject: [PATCH] Make sure to unlock mutex during error paths + +--- + driver/main.c | 21 ++++++++++++++------- + 1 file changed, 14 insertions(+), 7 deletions(-) + +Index: sysdig-0.8.0/driver/main.c +=================================================================== +--- sysdig-0.8.0.orig/driver/main.c ++++ sysdig-0.8.0/driver/main.c +@@ -703,7 +703,8 @@ cleanup_ioctl_procinfo: + + if (!ring) { + ASSERT(false); +- return -ENODEV; ++ ret = -ENODEV; ++ goto cleanup_ioctl; + } + + ring->capture_enabled = false; +@@ -724,7 +725,8 @@ cleanup_ioctl_procinfo: + + if (!ring) { + ASSERT(false); +- return -ENODEV; ++ ret = -ENODEV; ++ goto cleanup_ioctl; + } + + ring->capture_enabled = true; +@@ -777,7 +779,8 @@ cleanup_ioctl_procinfo: + new_sampling_ratio != 64 && + new_sampling_ratio != 128) { + pr_err("invalid sampling ratio %u\n", new_sampling_ratio); +- return -EINVAL; ++ ret = -EINVAL; ++ goto cleanup_ioctl; + } + + consumer->sampling_interval = 1000000000 / new_sampling_ratio; +@@ -797,7 +800,8 @@ cleanup_ioctl_procinfo: + + if (new_snaplen > RW_MAX_SNAPLEN) { + pr_err("invalid snaplen %u\n", new_snaplen); +- return -EINVAL; ++ ret = -EINVAL; ++ goto cleanup_ioctl; + } + + consumer->snaplen = new_snaplen; +@@ -828,7 +832,8 @@ cleanup_ioctl_procinfo: + + if (syscall_to_set > PPM_EVENT_MAX) { + pr_err("invalid syscall %u\n", syscall_to_set); +- return -EINVAL; ++ ret = -EINVAL; ++ goto cleanup_ioctl; + } + + set_bit(syscall_to_set, g_events_mask); +@@ -844,7 +849,8 @@ cleanup_ioctl_procinfo: + + if (syscall_to_unset > NR_syscalls) { + pr_err("invalid syscall %u\n", syscall_to_unset); +- return -EINVAL; ++ ret = -EINVAL; ++ goto cleanup_ioctl; + } + + clear_bit(syscall_to_unset, g_events_mask); +@@ -1002,7 +1008,8 @@ static int ppm_mmap(struct file *filp, s + ring = per_cpu_ptr(consumer->ring_buffers, ring_no); + if (!ring) { + ASSERT(false); +- return -ENODEV; ++ ret = -ENODEV; ++ goto cleanup_mmap; + } + + if (length <= PAGE_SIZE) { diff -Nru sysdig-0.8.0/debian/patches/0007-Update-probe-module-for-kernel-4.11-829.patch sysdig-0.8.0/debian/patches/0007-Update-probe-module-for-kernel-4.11-829.patch --- sysdig-0.8.0/debian/patches/0007-Update-probe-module-for-kernel-4.11-829.patch 1970-01-01 01:00:00.000000000 +0100 +++ sysdig-0.8.0/debian/patches/0007-Update-probe-module-for-kernel-4.11-829.patch 2017-10-27 14:54:55.000000000 +0100 @@ -0,0 +1,105 @@ +Author: Brett Bertocci +Date: Fri May 5 03:20:25 2017 -0700 + + Update probe module for kernel 4.11 (#829) + + * 4.11 kernel changes: remove cputime_t and helpers + + * Use nsec_to_clock_t() where needed + + While nsec_to_clock_t is declared in an available header file, + we need to define it because the kernel doesn't export the symbol. + + * Review fixes - better param.h include and indentation + +(backport of upstream sysdig commit 460f6e28feaaaaccdcc3e33332d2af65a2912ebb) +Signed-off-by: Colin Ian King + +Index: sysdig-0.8.0/driver/main.c +=================================================================== +--- sysdig-0.8.0.orig/driver/main.c ++++ sysdig-0.8.0/driver/main.c +@@ -38,7 +38,12 @@ along with sysdig. If not, see + #include + #include ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + #include ++#else ++#include ++#include ++#endif + #include + #include + #include +@@ -120,7 +125,9 @@ static int init_ring_buffer(struct ppm_r + static void free_ring_buffer(struct ppm_ring_buffer_context *ring); + static void reset_ring_buffer(struct ppm_ring_buffer_context *ring); + static ssize_t ppe_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) + void ppm_task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); ++#endif + + #ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS + #error The kernel must have HAVE_SYSCALL_TRACEPOINTS in order for sysdig to be useful +@@ -630,7 +637,11 @@ static long ppm_ioctl(struct file *filp, + task_lock(p); + #endif + if (nentries < pli.max_entries) { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + cputime_t utime, stime; ++#else ++ u64 utime, stime; ++#endif + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + task_cputime_adjusted(t, &utime, &stime); +@@ -638,8 +649,13 @@ static long ppm_ioctl(struct file *filp, + ppm_task_cputime_adjusted(t, &utime, &stime); + #endif + proclist_info->entries[nentries].pid = t->pid; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + proclist_info->entries[nentries].utime = cputime_to_clock_t(utime); + proclist_info->entries[nentries].stime = cputime_to_clock_t(stime); ++#else ++ proclist_info->entries[nentries].utime = nsec_to_clock_t(utime); ++ proclist_info->entries[nentries].stime = nsec_to_clock_t(stime); ++#endif + } + + nentries++; +Index: sysdig-0.8.0/driver/ppm_cputime.c +=================================================================== +--- sysdig-0.8.0.orig/driver/ppm_cputime.c ++++ sysdig-0.8.0/driver/ppm_cputime.c +@@ -320,4 +320,28 @@ void ppm_task_cputime_adjusted(struct ta + + #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) */ + #endif /* (defined CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) || (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)) */ +-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ +\ No newline at end of file ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++#include ++#include ++ ++/* ++ * Implementation copied from kernel/time/time.c in 4.11.0 ++ */ ++u64 nsec_to_clock_t(u64 x) ++{ ++#if (NSEC_PER_SEC % USER_HZ) == 0 ++ return div_u64(x, NSEC_PER_SEC / USER_HZ); ++#elif (USER_HZ % 512) == 0 ++ return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); ++#else ++ /* ++ * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, ++ * overflow after 64.99 years ++ * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... ++ */ ++ return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); ++#endif ++} ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */ diff -Nru sysdig-0.8.0/debian/patches/series sysdig-0.8.0/debian/patches/series --- sysdig-0.8.0/debian/patches/series 2017-07-17 14:49:13.000000000 +0100 +++ sysdig-0.8.0/debian/patches/series 2017-10-27 14:49:35.000000000 +0100 @@ -1,3 +1,7 @@ 0001-Fix-compilation-issues-with-kernel-4.9-684.patch 0002-Ifdef-__access_remote_vm-since-the-function-was-made.patch 0003-Support-for-updated-cpu-hotplug-API-in-4.10-kernel-7.patch +0004-Fix-for-kernel-3.9-with-CPU_ACCOUNTING_NATIVE-disabl.patch +0005-Fix-cputime-for-kernel-3.8-and-2.6.18.patch +0006-Make-sure-to-unlock-mutex-during-error-paths.patch +0007-Update-probe-module-for-kernel-4.11-829.patch