Received: by 2002:a25:ab43:0:0:0:0:0 with SMTP id u61csp5219952ybi; Tue, 11 Jun 2019 22:03:58 -0700 (PDT) X-Google-Smtp-Source: APXvYqz5/iPyPuKXtvfQW5u0FEgLBCNZVWEQSTHiicmVwmnJcD9g//BYOsFxDs1ZNd6W+MpqQTlx X-Received: by 2002:a63:3008:: with SMTP id w8mr23782403pgw.11.1560315838637; Tue, 11 Jun 2019 22:03:58 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1560315838; cv=none; d=google.com; s=arc-20160816; b=hTL7pXE27bvNfbh/sMAopyu2E7c+OnJ+TOhRThquXBEZ5ig+n6vQBU47x9tOe982sH U1vv9bdcWVY+Itoo88waPW/LTtwWlv9IGevVkd60rn2sMi8GvyyB1BWl3KgAzaY40v2D Y/tNXF7sSNb+vct2WQJtm7zEW8avcTir1gvT43EkvUn7pxjPNXgCbyI1teUtHEDNz4eP wcwJIIIylfKSvxWnd+3aFj4eKPTPvQMb4GZclgHu6Xg9/zc2HG3LY6T05XH2n3ezdi2Y R6j+1+qjWNzEzI5Fsb8EYw/6ye0Uk0ODBFs+EfUg0aTbHXNmeSi0Zs1N9Jthtek2UPFC /3Fg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from; bh=rwdSn2aEiHmcKA5n177q8Pw5wouauV4vNtEFbBN6uCw=; b=mCkMG/URAb+Bb8EPE9eeB8xp0q68/nxds/4J8DiJpDc+txEf4WutO478PEB+8SAmnR hDtptoxyx+YR/kVV7/L786toUpVBJW4xVK5Go1G/pGl/ch8FGxYpmZ7rphwsTx2WEfyN L8cyQzDV5RKjo3bDgAsiLjZvMOdWNqIXeAq5OveDOxF7sbVCGkCU6qO3e378e2AuhP1O LO1a3z7JtLNbGCi26yzkHJbMAfcJf4Sq9GCKk50OObVKbVxEBlX22DPSMOXTrkhB8Xsh txjbRy3AgOtXyrOH5byKO38RwYm75nt/CIbJcxz+sHkp3Ji1gap32J+iTTv50fidgvIG 7mkg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=QUARANTINE sp=NONE dis=NONE) header.from=vmware.com Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id l6si4825349pgl.444.2019.06.11.22.03.40; Tue, 11 Jun 2019 22:03:58 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=QUARANTINE sp=NONE dis=NONE) header.from=vmware.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2391892AbfFKWWw (ORCPT + 99 others); Tue, 11 Jun 2019 18:22:52 -0400 Received: from ex13-edg-ou-002.vmware.com ([208.91.0.190]:29003 "EHLO EX13-EDG-OU-002.vmware.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S2436827AbfFKWWS (ORCPT ); Tue, 11 Jun 2019 18:22:18 -0400 Received: from sc9-mailhost3.vmware.com (10.113.161.73) by EX13-EDG-OU-002.vmware.com (10.113.208.156) with Microsoft SMTP Server id 15.0.1156.6; Tue, 11 Jun 2019 15:22:13 -0700 Received: from rlwimi.localdomain (unknown [10.129.220.121]) by sc9-mailhost3.vmware.com (Postfix) with ESMTP id D4A0941BAB; Tue, 11 Jun 2019 15:22:16 -0700 (PDT) From: Matt Helsley To: LKML CC: Ingo Molnar , Josh Poimboeuf , Peter Zijlstra , Steven Rostedt , Matt Helsley Subject: [PATCH v2 08/13] recordmcount: Clarify what cleanup() does Date: Tue, 11 Jun 2019 15:21:50 -0700 Message-ID: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 7BIT Content-Type: text/plain; charset=US-ASCII Received-SPF: None (EX13-EDG-OU-002.vmware.com: mhelsley@vmware.com does not designate permitted sender hosts) Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org cleanup() mostly frees/unmaps the malloc'd/privately-mapped copy of the ELF file recordmcount is working on, which is set up in mmap_file(). It also deals with positioning within the pseduo prive-mapping of the file and appending to the ELF file. Split into two steps: mmap_cleanup() for the mapping itself file_append_cleanup() for allocations storing the appended ELF data. Also, move the global variable initializations out of the main, per-object-file loop and nearer to the alloc/init (mmap_file()) and two cleanup functions so we can more clearly see how they're related. Signed-off-by: Matt Helsley --- scripts/recordmcount.c | 151 ++++++++++++++++++++++------------------- 1 file changed, 81 insertions(+), 70 deletions(-) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 111419c282d3..9f4af109277e 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -48,21 +48,26 @@ static void *file_map; /* pointer of the mapped file */ static void *file_end; /* pointer to the end of the mapped file */ static int file_updated; /* flag to state file was changed */ static void *file_ptr; /* current file pointer location */ + static void *file_append; /* added to the end of the file */ static size_t file_append_size; /* how much is added to end of file */ /* Per-file resource cleanup when multiple files. */ -static void cleanup(void) +static void file_append_cleanup(void) +{ + free(file_append); + file_append = NULL; + file_append_size = 0; + file_updated = 0; +} + +static void mmap_cleanup(void) { if (!mmap_failed) munmap(file_map, sb.st_size); else free(file_map); file_map = NULL; - free(file_append); - file_append = NULL; - file_append_size = 0; - file_updated = 0; } /* ulseek, uwrite, ...: Check return value for errors. */ @@ -103,7 +108,8 @@ static ssize_t uwrite(void const *const buf, size_t const count) } if (!file_append) { perror("write"); - cleanup(); + file_append_cleanup(); + mmap_cleanup(); return -1; } if (file_ptr < file_end) { @@ -129,12 +135,76 @@ static void * umalloc(size_t size) void *const addr = malloc(size); if (addr == 0) { fprintf(stderr, "malloc failed: %zu bytes\n", size); - cleanup(); + file_append_cleanup(); + mmap_cleanup(); return NULL; } return addr; } +/* + * Get the whole file as a programming convenience in order to avoid + * malloc+lseek+read+free of many pieces. If successful, then mmap + * avoids copying unused pieces; else just read the whole file. + * Open for both read and write; new info will be appended to the file. + * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr + * do not propagate to the file until an explicit overwrite at the last. + * This preserves most aspects of consistency (all except .st_size) + * for simultaneous readers of the file while we are appending to it. + * However, multiple writers still are bad. We choose not to use + * locking because it is expensive and the use case of kernel build + * makes multiple writers unlikely. + */ +static void *mmap_file(char const *fname) +{ + /* Avoid problems if early cleanup() */ + fd_map = -1; + mmap_failed = 1; + file_map = NULL; + file_ptr = NULL; + file_updated = 0; + sb.st_size = 0; + + fd_map = open(fname, O_RDONLY); + if (fd_map < 0) { + perror(fname); + return NULL; + } + if (fstat(fd_map, &sb) < 0) { + perror(fname); + goto out; + } + if (!S_ISREG(sb.st_mode)) { + fprintf(stderr, "not a regular file: %s\n", fname); + goto out; + } + file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, + fd_map, 0); + if (file_map == MAP_FAILED) { + mmap_failed = 1; + file_map = umalloc(sb.st_size); + if (!file_map) { + perror(fname); + goto out; + } + if (read(fd_map, file_map, sb.st_size) != sb.st_size) { + perror(fname); + free(file_map); + file_map = NULL; + goto out; + } + } else + mmap_failed = 0; +out: + close(fd_map); + fd_map = -1; + + file_end = file_map + sb.st_size; + + return file_map; +} + + static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; static unsigned char *ideal_nop; @@ -238,61 +308,6 @@ static int make_nop_arm64(void *map, size_t const offset) return 0; } -/* - * Get the whole file as a programming convenience in order to avoid - * malloc+lseek+read+free of many pieces. If successful, then mmap - * avoids copying unused pieces; else just read the whole file. - * Open for both read and write; new info will be appended to the file. - * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr - * do not propagate to the file until an explicit overwrite at the last. - * This preserves most aspects of consistency (all except .st_size) - * for simultaneous readers of the file while we are appending to it. - * However, multiple writers still are bad. We choose not to use - * locking because it is expensive and the use case of kernel build - * makes multiple writers unlikely. - */ -static void *mmap_file(char const *fname) -{ - file_map = NULL; - sb.st_size = 0; - fd_map = open(fname, O_RDONLY); - if (fd_map < 0) { - perror(fname); - return NULL; - } - if (fstat(fd_map, &sb) < 0) { - perror(fname); - goto out; - } - if (!S_ISREG(sb.st_mode)) { - fprintf(stderr, "not a regular file: %s\n", fname); - goto out; - } - file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, - fd_map, 0); - mmap_failed = 0; - if (file_map == MAP_FAILED) { - mmap_failed = 1; - file_map = umalloc(sb.st_size); - if (!file_map) { - perror(fname); - goto out; - } - if (read(fd_map, file_map, sb.st_size) != sb.st_size) { - perror(fname); - free(file_map); - file_map = NULL; - goto out; - } - } -out: - close(fd_map); - - file_end = file_map + sb.st_size; - - return file_map; -} - static int write_file(const char *fname) { char tmp_file[strlen(fname) + 4]; @@ -436,10 +451,11 @@ static void MIPS64_r_info(Elf64_Rel *const rp, unsigned sym, unsigned type) static int do_file(char const *const fname) { - Elf32_Ehdr *const ehdr = mmap_file(fname); + Elf32_Ehdr *ehdr; unsigned int reltype = 0; int rc = -1; + ehdr = mmap_file(fname); if (!ehdr) goto out; @@ -575,7 +591,8 @@ static int do_file(char const *const fname) rc = write_file(fname); out: - cleanup(); + file_append_cleanup(); + mmap_cleanup(); return rc; } @@ -618,12 +635,6 @@ int main(int argc, char *argv[]) strcmp(file + (len - ftrace_size), ftrace) == 0) continue; - /* Avoid problems if early cleanup() */ - fd_map = -1; - mmap_failed = 1; - file_map = NULL; - file_ptr = NULL; - file_updated = 0; if (do_file(file)) { fprintf(stderr, "%s: failed\n", file); ++n_error; -- 2.20.1