blob: 99fe85a213c669d8ddea3225a63756d12550d25f (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <asm/cpufeature.h>
extern struct xor_block_template xor_block_pII_mmx;
extern struct xor_block_template xor_block_p5_mmx;
extern struct xor_block_template xor_block_sse;
extern struct xor_block_template xor_block_sse_pf64;
extern struct xor_block_template xor_block_avx;
/*
* When SSE is available, use it as it can write around L2. We may also be able
* to load into the L1 only depending on how the cpu deals with a load to a line
* that is being prefetched.
*
* When AVX2 is available, force using it as it is better by all measures.
*
* 32-bit without MMX can fall back to the generic routines.
*/
static __always_inline void __init arch_xor_init(void)
{
if (boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_OSXSAVE)) {
xor_force(&xor_block_avx);
} else if (IS_ENABLED(CONFIG_X86_64) || boot_cpu_has(X86_FEATURE_XMM)) {
xor_register(&xor_block_sse);
xor_register(&xor_block_sse_pf64);
} else if (boot_cpu_has(X86_FEATURE_MMX)) {
xor_register(&xor_block_pII_mmx);
xor_register(&xor_block_p5_mmx);
} else {
xor_register(&xor_block_8regs);
xor_register(&xor_block_8regs_p);
xor_register(&xor_block_32regs);
xor_register(&xor_block_32regs_p);
}
}
|