Skip to content

Commit e71bb4e

Browse files
suryasaimadhuKAGA-KOKO
authored andcommitted
x86/microcode/AMD: Unify load_ucode_amd_ap()
Use a version for both bitness by adding a helper which does the actual container finding and parsing which can be used on any CPU - BSP or AP. Streamlines the paths more. Signed-off-by: Borislav Petkov <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Thomas Gleixner <[email protected]>
1 parent f3ad136 commit e71bb4e

File tree

1 file changed

+31
-50
lines changed
  • arch/x86/kernel/cpu/microcode

1 file changed

+31
-50
lines changed

arch/x86/kernel/cpu/microcode/amd.c

Lines changed: 31 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
261261
#endif
262262
}
263263

264-
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
264+
void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
265265
{
266266
struct ucode_cpu_info *uci;
267267
struct cpio_data cp;
@@ -281,89 +281,71 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
281281
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
282282
cp = find_microcode_in_initrd(path, use_pa);
283283

284-
if (!(cp.data && cp.size))
285-
return;
286-
287284
/* Needed in load_microcode_amd() */
288285
uci->cpu_sig.sig = cpuid_1_eax;
289286

290-
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
287+
*ret = cp;
291288
}
292289

293-
#ifdef CONFIG_X86_32
294-
/*
295-
* On 32-bit, since AP's early load occurs before paging is turned on, we
296-
* cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
297-
* So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
298-
* In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
299-
* which is used upon resume from suspend.
300-
*/
301-
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
290+
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
302291
{
303-
struct microcode_amd *mc;
304-
struct cpio_data cp;
292+
struct cpio_data cp = { };
305293

306-
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
307-
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
308-
__apply_microcode_amd(mc);
309-
return;
310-
}
311-
312-
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
313-
cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
294+
__load_ucode_amd(cpuid_1_eax, &cp);
314295

315296
if (!(cp.data && cp.size))
316297
return;
317298

318-
/*
319-
* This would set amd_ucode_patch above so that the following APs can
320-
* use it directly instead of going down this path again.
321-
*/
322299
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
323300
}
324-
#else
301+
325302
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
326303
{
327304
struct equiv_cpu_entry *eq;
328305
struct microcode_amd *mc;
306+
struct cont_desc *desc;
329307
u16 eq_id;
330308

309+
if (IS_ENABLED(CONFIG_X86_32)) {
310+
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
311+
desc = (struct cont_desc *)__pa_nodebug(&cont);
312+
} else {
313+
mc = (struct microcode_amd *)amd_ucode_patch;
314+
desc = &cont;
315+
}
316+
331317
/* First AP hasn't cached it yet, go through the blob. */
332-
if (!cont.data) {
333-
struct cpio_data cp;
318+
if (!desc->data) {
319+
struct cpio_data cp = { };
334320

335-
if (cont.size == -1)
321+
if (desc->size == -1)
336322
return;
337323

338324
reget:
339-
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
340-
cp = find_microcode_in_initrd(ucode_path, false);
341-
342-
if (!(cp.data && cp.size)) {
343-
/*
344-
* Mark it so that other APs do not scan again
345-
* for no real reason and slow down boot
346-
* needlessly.
347-
*/
348-
cont.size = -1;
349-
return;
350-
}
325+
__load_ucode_amd(cpuid_1_eax, &cp);
326+
if (!(cp.data && cp.size)) {
327+
/*
328+
* Mark it so that other APs do not scan again for no
329+
* real reason and slow down boot needlessly.
330+
*/
331+
desc->size = -1;
332+
return;
351333
}
352334

353-
if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) {
354-
cont.data = NULL;
355-
cont.size = -1;
335+
if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, desc)) {
336+
desc->data = NULL;
337+
desc->size = -1;
356338
return;
357339
}
358340
}
359341

360-
eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
342+
eq = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ);
361343

362344
eq_id = find_equiv_id(eq, cpuid_1_eax);
363345
if (!eq_id)
364346
return;
365347

366-
if (eq_id == cont.eq_id) {
348+
if (eq_id == desc->eq_id) {
367349
u32 rev, dummy;
368350

369351
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
@@ -384,7 +366,6 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
384366
goto reget;
385367
}
386368
}
387-
#endif /* CONFIG_X86_32 */
388369

389370
static enum ucode_state
390371
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);

0 commit comments

Comments
 (0)