summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/randomize_kstack.h52
-rw-r--r--include/linux/sched.h4
2 files changed, 17 insertions, 39 deletions
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 5d3916ca747c..024fc20e7762 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/percpu-defs.h>
+#include <linux/prandom.h>
DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
randomize_kstack_offset);
@@ -45,9 +46,22 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100)
#endif
+DECLARE_PER_CPU(struct rnd_state, kstack_rnd_state);
+
+static __always_inline u32 get_kstack_offset(void)
+{
+ struct rnd_state *state;
+ u32 rnd;
+
+ state = &get_cpu_var(kstack_rnd_state);
+ rnd = prandom_u32_state(state);
+ put_cpu_var(kstack_rnd_state);
+
+ return rnd;
+}
+
/**
- * add_random_kstack_offset - Increase stack utilization by previously
- * chosen random offset
+ * add_random_kstack_offset - Increase stack utilization by a random offset.
*
* This should be used in the syscall entry path after user registers have been
* stored to the stack. Preemption may be enabled. For testing the resulting
@@ -56,47 +70,15 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
#define add_random_kstack_offset() do { \
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
- u32 offset = current->kstack_offset; \
+ u32 offset = get_kstack_offset(); \
u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
/* Keep allocation even after "ptr" loses scope. */ \
asm volatile("" :: "r"(ptr) : "memory"); \
} \
} while (0)
-/**
- * choose_random_kstack_offset - Choose the random offset for the next
- * add_random_kstack_offset()
- *
- * This should only be used during syscall exit. Preemption may be enabled. This
- * position in the syscall flow is done to frustrate attacks from userspace
- * attempting to learn the next offset:
- * - Maximize the timing uncertainty visible from userspace: if the
- * offset is chosen at syscall entry, userspace has much more control
- * over the timing between choosing offsets. "How long will we be in
- * kernel mode?" tends to be more difficult to predict than "how long
- * will we be in user mode?"
- * - Reduce the lifetime of the new offset sitting in memory during
- * kernel mode execution. Exposure of "thread-local" memory content
- * (e.g. current, percpu, etc) tends to be easier than arbitrary
- * location memory exposure.
- */
-#define choose_random_kstack_offset(rand) do { \
- if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
- &randomize_kstack_offset)) { \
- u32 offset = current->kstack_offset; \
- offset = ror32(offset, 5) ^ (rand); \
- current->kstack_offset = offset; \
- } \
-} while (0)
-
-static inline void random_kstack_task_init(struct task_struct *tsk)
-{
- tsk->kstack_offset = 0;
-}
#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#define add_random_kstack_offset() do { } while (0)
-#define choose_random_kstack_offset(rand) do { } while (0)
-#define random_kstack_task_init(tsk) do { } while (0)
#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8358e430dd7f..a7b4a980eb2f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1592,10 +1592,6 @@ struct task_struct {
unsigned long prev_lowest_stack;
#endif
-#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
- u32 kstack_offset;
-#endif
-
#ifdef CONFIG_X86_MCE
void __user *mce_vaddr;
__u64 mce_kflags;