kexec_file: Change kexec_add_buffer to take kexec_buf as argument.

This is done to simplify the kexec_add_buffer argument list.
Adapt all callers to set up a kexec_buf to pass to kexec_add_buffer.

In addition, change the type of kexec_buf.buffer from char * to void *.
There is no particular reason for it to be a char *, and the change
allows us to get rid of 3 existing casts to char * in the code.

Signed-off-by: Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
Acked-by: Dave Young <dyoung@redhat.com>
Acked-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Thiago Jung Bauermann 2016-11-29 23:45:48 +11:00 committed by Michael Ellerman
parent 60fe3910bb
commit ec2b9bfaac
4 changed files with 87 additions and 94 deletions

View file

@ -631,9 +631,9 @@ static int determine_backup_region(u64 start, u64 end, void *arg)
int crash_load_segments(struct kimage *image)
{
unsigned long src_start, src_sz, elf_sz;
void *elf_addr;
int ret;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ULONG_MAX, .top_down = false };
/*
* Determine and load a segment for backup area. First 640K RAM
@ -647,43 +647,44 @@ int crash_load_segments(struct kimage *image)
if (ret < 0)
return ret;
src_start = image->arch.backup_src_start;
src_sz = image->arch.backup_src_sz;
/* Add backup segment. */
if (src_sz) {
if (image->arch.backup_src_sz) {
kbuf.buffer = &crash_zero_bytes;
kbuf.bufsz = sizeof(crash_zero_bytes);
kbuf.memsz = image->arch.backup_src_sz;
kbuf.buf_align = PAGE_SIZE;
/*
* Ideally there is no source for backup segment. This is
* copied in purgatory after crash. Just add a zero filled
* segment for now to make sure checksum logic works fine.
*/
ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
sizeof(crash_zero_bytes), src_sz,
PAGE_SIZE, 0, -1, 0,
&image->arch.backup_load_addr);
ret = kexec_add_buffer(&kbuf);
if (ret)
return ret;
image->arch.backup_load_addr = kbuf.mem;
pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
image->arch.backup_load_addr, src_start, src_sz);
image->arch.backup_load_addr,
image->arch.backup_src_start, kbuf.memsz);
}
/* Prepare elf headers and add a segment */
ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
if (ret)
return ret;
image->arch.elf_headers = elf_addr;
image->arch.elf_headers_sz = elf_sz;
image->arch.elf_headers = kbuf.buffer;
image->arch.elf_headers_sz = kbuf.bufsz;
ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
ELF_CORE_HEADER_ALIGN, 0, -1, 0,
&image->arch.elf_load_addr);
kbuf.memsz = kbuf.bufsz;
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
ret = kexec_add_buffer(&kbuf);
if (ret) {
vfree((void *)image->arch.elf_headers);
return ret;
}
image->arch.elf_load_addr = kbuf.mem;
pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
image->arch.elf_load_addr, elf_sz, elf_sz);
image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
return ret;
}