Skip to content

Commit

Permalink
Merge branch 'master' into i#7199-make-adding-registers-easier
Browse files Browse the repository at this point in the history
  • Loading branch information
philramsey-arm authored Jan 24, 2025
2 parents 7cd3e6b + bdeedf9 commit 8fb15b7
Show file tree
Hide file tree
Showing 25 changed files with 343 additions and 76 deletions.
3 changes: 3 additions & 0 deletions api/docs/release.dox
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,9 @@ changes:
- The size of #dr_mcontext_t on 32-bit Arm has been increased by 4 so that
the struct can be pushed onto the 8-byte aligned stack without additional
padding. The offset of the field "simd" has changed.
- Added new fields elf_path and elf_path_size to dr_memory_dump_spec_t. When
dr_create_memory_dump() returns true and elf_path is not NULL, elf_path will be
written with the path to the memory dump file.

Further non-compatibility-affecting changes include:
- Added support for reading a single drmemtrace trace file from stdin
Expand Down
9 changes: 5 additions & 4 deletions clients/drcachesim/scheduler/scheduler.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* **********************************************************
* Copyright (c) 2023-2024 Google, Inc. All rights reserved.
* Copyright (c) 2023-2025 Google, Inc. All rights reserved.
* **********************************************************/

/*
Expand Down Expand Up @@ -1162,9 +1162,10 @@ template <typename RecordType, typename ReaderType> class scheduler_tmpl_t {
* For #SCHEDULER_USE_INPUT_ORDINALS or
* #SCHEDULER_USE_SINGLE_INPUT_ORDINALS, returns the input stream ordinal, except
* for the case of a single combined-stream input with the passed-in thread id
* set to INVALID_THREAD_ID (the serial analysis mode for analyzer tools) in
* which case the last trace record's tid is returned; otherwise returns the
* output stream ordinal.
* set to INVALID_THREAD_ID (the online analysis mode for analyzer tools where the
* inputs for multiple threads are all combined in one process-wide pipe) in which
* case the last trace record's tid as an ordinal (in the order observed in the
* output stream) is returned; otherwise returns the output stream ordinal.
*/
int
get_shard_index() const override;
Expand Down
16 changes: 11 additions & 5 deletions core/arch/mangle_shared.c
Original file line number Diff line number Diff line change
Expand Up @@ -1070,19 +1070,25 @@ mangle_syscall_code(dcontext_t *dcontext, fragment_t *f, byte *pc, bool skip)
/* jmps are right before syscall, but there can be nops to pad exit cti on x86 */
ASSERT(cti_pc == prev_pc - JMP_LONG_LENGTH);
ASSERT(skip_pc < cti_pc);
# ifdef ARM
ASSERT(cti_pc - skip_pc ==
(dr_get_isa_mode(dcontext) == DR_ISA_ARM_A32 ? JMP_LONG_LENGTH
: JMP_SHORT_LENGTH));
# else
ASSERT(
skip_pc ==
cti_pc -
JMP_SHORT_LENGTH IF_X86(|| *(cti_pc - JMP_SHORT_LENGTH) == RAW_OPCODE_nop));
# endif
instr_reset(dcontext, &instr);
pc = decode(dcontext, skip_pc, &instr);
ASSERT(pc != NULL); /* our own code! */
# ifdef ARM
ASSERT(instr_get_opcode(&instr) ==
OP_jmp_short
/* For A32 it's not OP_b_short */
IF_ARM(||
(instr_get_opcode(&instr) == OP_jmp &&
opnd_get_pc(instr_get_target(&instr)) == pc + ARM_INSTR_SIZE)));
(dr_get_isa_mode(dcontext) == DR_ISA_ARM_A32 ? OP_b : OP_b_short));
# else
ASSERT(instr_get_opcode(&instr) == OP_jmp_short);
# endif
ASSERT(pc <= cti_pc); /* could be nops */
DOCHECK(1, {
pc = decode(dcontext, cti_pc, &cti);
Expand Down
60 changes: 40 additions & 20 deletions core/drlibc/drlibc_module_elf.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* *******************************************************************************
* Copyright (c) 2012-2021 Google, Inc. All rights reserved.
* Copyright (c) 2012-2025 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* *******************************************************************************/
Expand Down Expand Up @@ -380,7 +380,7 @@ app_pc
elf_loader_map_phdrs(elf_loader_t *elf, bool fixed, map_fn_t map_func,
unmap_fn_t unmap_func, prot_fn_t prot_func,
check_bounds_fn_t check_bounds_func, memset_fn_t memset_func,
modload_flags_t flags)
modload_flags_t flags, overlap_map_fn_t overlap_map_func)
{
app_pc lib_base, lib_end, last_end;
ELF_HEADER_TYPE *elf_hdr = elf->ehdr;
Expand Down Expand Up @@ -480,26 +480,46 @@ elf_loader_map_phdrs(elf_loader_t *elf, bool fixed, map_fn_t map_func,
do_mmap = false;
elf->image_size = last_end - lib_base;
}
/* XXX:
* This function can be called after dynamo_heap_initialized,
* and we will use map_file instead of os_map_file.
* However, map_file does not allow mmap with overlapped memory,
* so we have to unmap the old memory first.
* This might be a problem, e.g.
* one thread unmaps the memory and before mapping the actual file,
* another thread requests memory via mmap takes the memory here,
* a racy condition.
*/
if (seg_size > 0) { /* i#1872: handle empty segments */
if (do_mmap) {
(*unmap_func)(seg_base, seg_size);
map = (*map_func)(
elf->fd, &seg_size, pg_offs, seg_base /* base */,
seg_prot | MEMPROT_WRITE /* prot */,
MAP_FILE_COPY_ON_WRITE /*writes should not change file*/ |
MAP_FILE_IMAGE |
/* we don't need MAP_FILE_REACHABLE b/c we're fixed */
MAP_FILE_FIXED);
if (overlap_map_func != NULL) {
/* The relevant part of the anonymous map obtained above is
* expected to automatically and atomically get unmapped because
* we use overlap_map_func (which requires MAP_FILE_FIXED).
*/
map = (*overlap_map_func)(
elf->fd, &seg_size, pg_offs, seg_base /* base */,
seg_prot | MEMPROT_WRITE /* prot */,
MAP_FILE_COPY_ON_WRITE /*writes should not change file*/ |
MAP_FILE_IMAGE |
/* we don't need MAP_FILE_REACHABLE b/c we're fixed */
MAP_FILE_FIXED);
} else {
/* TODO i#7192:
* This function can be called after dynamo_heap_initialized,
* and we will use d_r_map_file instead of os_map_file.
* However, d_r_map_file performs memory bookkeeping which needs
* to be first updated using an explicit d_r_unmap_file operation.
*
* This might be a problem, e.g. one thread unmaps the memory and
* before mapping the actual file, another thread requests memory
* via mmap takes the memory here, a racy condition. This can be
* solved by adding a new d_r_overlap_map_file that avoids
* actually unmapping the range and atomically replaces it with
* the new mapping using MAP_FIXED, and additionally performs the
* required bookkeeping. When available, specify
* d_r_overlap_map_file as the overlap_map_func in callers of this
* function that use d_r_map_file and d_r_unmap_file.
*/
(*unmap_func)(seg_base, seg_size);
map = (*map_func)(
elf->fd, &seg_size, pg_offs, seg_base /* base */,
seg_prot | MEMPROT_WRITE /* prot */,
MAP_FILE_COPY_ON_WRITE /*writes should not change file*/ |
MAP_FILE_IMAGE |
/* we don't need MAP_FILE_REACHABLE b/c we're fixed */
MAP_FILE_FIXED);
}
ASSERT(map != NULL);
/* fill zeros at extend size */
file_end = (app_pc)prog_hdr->p_vaddr + prog_hdr->p_filesz;
Expand Down
7 changes: 7 additions & 0 deletions core/ir/instr_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -2638,6 +2638,13 @@ enum {
* looking at machine state from the kernel, such as in a signal handler.
*/
EFLAGS_IT_SIZE = 0x06001c00,
# ifdef ARM
/**
* The bits in the CPSR register that are RES1: they are either
* hardwired to 1 or their value should be preserved.
*/
EFLAGS_RES1 = 0x00000010,
# endif
};

/** The bits in the CPSR register of the T32 IT block state. */
Expand Down
16 changes: 13 additions & 3 deletions core/lib/dr_tools.h
Original file line number Diff line number Diff line change
Expand Up @@ -357,20 +357,30 @@ typedef struct _dr_memory_dump_spec_t {
/** The type of memory dump requested. */
dr_memory_dump_flags_t flags;
/**
* This field only applies to DR_MEMORY_DUMP_LDMP. This string is
* This field only applies to #DR_MEMORY_DUMP_LDMP. This string is
* stored inside the ldmp as the reason for the dump.
*/
const char *label;
/**
* This field only applies to DR_MEMORY_DUMP_LDMP. This is an optional output
* This field only applies to #DR_MEMORY_DUMP_LDMP. This is an optional output
* field that, if non-NULL, will be written with the path to the created file.
*/
char *ldmp_path;
/**
* This field only applies to DR_MEMORY_DUMP_LDMP. This is the maximum size,
* This field only applies to #DR_MEMORY_DUMP_LDMP. This is the maximum size,
* in bytes, of ldmp_path.
*/
size_t ldmp_path_size;
/**
* This field only applies to #DR_MEMORY_DUMP_ELF. This is an optional output
* field that, if non-NULL, will be written with the path to the created file.
*/
char *elf_path;
/**
* This field only applies to #DR_MEMORY_DUMP_ELF. This is the maximum size,
* in bytes, of elf_path.
*/
size_t elf_path_size;
} dr_memory_dump_spec_t;

DR_API
Expand Down
6 changes: 6 additions & 0 deletions core/lib/globals_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,12 @@ typedef struct _instr_t instr_t;
# define IF_NOT_ANDROID(x) x
#endif

#ifdef ANDROID32
# define IF_NOT_ANDROID32(x)
#else
# define IF_NOT_ANDROID32(x) x
#endif

#ifdef X64
# define IF_X64(x) x
# define IF_X64_ELSE(x, y) x
Expand Down
3 changes: 2 additions & 1 deletion core/lib/instrument.c
Original file line number Diff line number Diff line change
Expand Up @@ -2533,7 +2533,8 @@ dr_create_memory_dump(dr_memory_dump_spec_t *spec)
#elif defined(LINUX) && \
((defined(X64) && defined(X86)) || (defined(AARCH64) && !defined(ANDROID64)))
if (TEST(DR_MEMORY_DUMP_ELF, spec->flags)) {
return os_dump_core_live(get_thread_private_dcontext());
return os_dump_core_live(get_thread_private_dcontext(), spec->elf_path,
spec->elf_path_size);
}
#endif
return false;
Expand Down
9 changes: 6 additions & 3 deletions core/unix/coredump.c
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ write_fpregset_note(DR_PARAM_IN dcontext_t *dcontext, DR_PARAM_IN priv_mcontext_
* false otherwise.
*/
static bool
os_dump_core_internal(dcontext_t *dcontext)
os_dump_core_internal(dcontext_t *dcontext, char *path DR_PARAM_OUT, size_t path_sz)
{
priv_mcontext_t mc;
if (!dr_get_mcontext_priv(dcontext, NULL, &mc))
Expand Down Expand Up @@ -477,6 +477,9 @@ os_dump_core_internal(dcontext_t *dcontext)
SYSLOG_INTERNAL_ERROR("Unable to open the core dump file.");
return false;
}
if (path != NULL) {
d_r_strncpy(path, dump_core_file_name, path_sz);
}
// We use two types of program headers. NOTE is used to store prstatus
// structure and floating point registers. LOAD is used to specify loadable
// segments. All but one section (shstrtab which stores section names)
Expand Down Expand Up @@ -650,7 +653,7 @@ os_dump_core_internal(dcontext_t *dcontext)
* Returns true if a core dump file is written, false otherwise.
*/
bool
os_dump_core_live(dcontext_t *dcontext)
os_dump_core_live(dcontext_t *dcontext, char *path DR_PARAM_OUT, size_t path_sz)
{
#ifdef DR_HOST_NOT_TARGET
// Memory dump is supported only when the host and the target are the same.
Expand All @@ -671,7 +674,7 @@ os_dump_core_live(dcontext_t *dcontext)
}

// TODO i#7046: Add support to save register values for all threads.
const bool ret = os_dump_core_internal(dcontext);
const bool ret = os_dump_core_internal(dcontext, path, path_sz);

end_synch_with_all_threads(threads, num_threads,
/*resume=*/true);
Expand Down
16 changes: 14 additions & 2 deletions core/unix/injector.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* **********************************************************
* Copyright (c) 2012-2022 Google, Inc. All rights reserved.
* Copyright (c) 2012-2025 Google, Inc. All rights reserved.
* **********************************************************/

/*
Expand Down Expand Up @@ -1453,6 +1453,17 @@ injectee_unmap(byte *addr, size_t size)
return true;
}

static byte *
injectee_overlap_map_file(file_t f, size_t *size DR_PARAM_INOUT, uint64 offs, app_pc addr,
uint prot, map_flags_t map_flags)
{
/* This works only if the user wants the new mapping only at the given addr,
* and it is acceptable to unmap any mapping already existing there.
*/
ASSERT(TEST(MAP_FILE_FIXED, map_flags));
return injectee_map_file(f, size, offs, addr, prot, map_flags);
}

/* Do an mprotect syscall in the injectee. */
static bool
injectee_prot(byte *addr, size_t size, uint prot /*MEMPROT_*/)
Expand Down Expand Up @@ -1784,7 +1795,8 @@ inject_ptrace(dr_inject_info_t *info, const char *library_path)
injectee_dr_fd = dr_fd;
injected_base = elf_loader_map_phdrs(
&loader, true /*fixed*/, injectee_map_file, injectee_unmap, injectee_prot, NULL,
injectee_memset, MODLOAD_SEPARATE_PROCESS /*!reachable*/);
injectee_memset, MODLOAD_SEPARATE_PROCESS /*!reachable*/,
injectee_overlap_map_file);
if (injected_base == NULL) {
if (verbose)
fprintf(stderr, "Unable to mmap libdynamorio.so in injectee\n");
Expand Down
51 changes: 44 additions & 7 deletions core/unix/loader.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* *******************************************************************************
* Copyright (c) 2011-2024 Google, Inc. All rights reserved.
* Copyright (c) 2011-2025 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* *******************************************************************************/

Expand Down Expand Up @@ -484,6 +484,33 @@ privload_check_new_map_bounds(elf_loader_t *elf, byte *map_base, byte *map_end)
}
#endif

#ifdef LINUX
/* XXX i#7192: Consider making this an os.h API, like the related os_map_file and
* os_unmap_file.
*/
static byte *
overlap_map_file_func(file_t f, size_t *size DR_PARAM_INOUT, uint64 offs, app_pc addr,
uint prot, map_flags_t map_flags)
{
/* This works only if the user wants the new mapping only at the given addr,
* and it is acceptable to unmap any mapping already existing there.
*/
ASSERT(TEST(MAP_FILE_FIXED, map_flags));
if (DYNAMO_OPTION(vm_reserve) && is_vmm_reserved_address(addr, *size, NULL, NULL)) {
/* If the initially reserved address was from our vmm range, we need to
* use os_unmap_file to make sure we perform our heap bookkeeping.
* In this case os_unmap_file does not do any munmap syscall, so there
* is not really any unmap-to-map race with other threads.
*/
os_unmap_file(addr, *size);
}
/* MAP_FILE_FIXED (which is MAP_FIXED in the mmap syscall) will cause the
* overlapping region to automatically and atomically get unmapped.
*/
return os_map_file(f, size, offs, addr, prot, map_flags);
}
#endif

/* This only maps, as relocation for ELF requires processing imports first,
* which we have to delay at init time at least.
*/
Expand All @@ -494,6 +521,7 @@ privload_map_and_relocate(const char *filename, size_t *size DR_PARAM_OUT,
#ifdef LINUX
map_fn_t map_func;
unmap_fn_t unmap_func;
overlap_map_fn_t overlap_map_func;
prot_fn_t prot_func;
app_pc base = NULL;
elf_loader_t loader;
Expand All @@ -506,10 +534,17 @@ privload_map_and_relocate(const char *filename, size_t *size DR_PARAM_OUT,
if (dynamo_heap_initialized && !standalone_library) {
map_func = d_r_map_file;
unmap_func = d_r_unmap_file;
/* TODO i#7192: Implement a new d_r_overlap_map_file that performs
* remapping similar to overlap_map_file_func (using just a map call with
* MAP_FIXED, but without any explicit unmap) but also does the required
* bookeeping.
*/
overlap_map_func = NULL;
prot_func = set_protection;
} else {
map_func = os_map_file;
unmap_func = os_unmap_file;
overlap_map_func = overlap_map_file_func;
prot_func = os_set_protection;
}

Expand All @@ -535,7 +570,7 @@ privload_map_and_relocate(const char *filename, size_t *size DR_PARAM_OUT,
}
base = elf_loader_map_phdrs(&loader, false /* fixed */, map_func, unmap_func,
prot_func, privload_check_new_map_bounds, memset,
privload_map_flags(flags));
privload_map_flags(flags), overlap_map_func);
if (base != NULL) {
if (size != NULL)
*size = loader.image_size;
Expand Down Expand Up @@ -2091,9 +2126,10 @@ reload_dynamorio(void **init_sp, app_pc conflict_start, app_pc conflict_end)
}

/* Now load the 2nd libdynamorio.so */
dr_map = elf_loader_map_phdrs(&dr_ld, false /*!fixed*/, os_map_file, os_unmap_file,
os_set_protection, privload_check_new_map_bounds,
memset, privload_map_flags(0 /*!reachable*/));
dr_map =
elf_loader_map_phdrs(&dr_ld, false /*!fixed*/, os_map_file, os_unmap_file,
os_set_protection, privload_check_new_map_bounds, memset,
privload_map_flags(0 /*!reachable*/), overlap_map_file_func);
ASSERT(dr_map != NULL);
ASSERT(is_elf_so_header(dr_map, 0));

Expand Down Expand Up @@ -2251,7 +2287,8 @@ privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size)
/* ensure there's space for the brk */
map_exe_file_and_brk, os_unmap_file, os_set_protection,
privload_check_new_map_bounds, memset,
privload_map_flags(MODLOAD_IS_APP /*!reachable*/));
privload_map_flags(MODLOAD_IS_APP /*!reachable*/),
NULL /*overlap_map_func*/);
apicheck(exe_map != NULL,
"Failed to load application. "
"Check path and architecture.");
Expand Down Expand Up @@ -2311,7 +2348,7 @@ privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size)
interp_map = elf_loader_map_phdrs(
&interp_ld, false /* fixed */, os_map_file, os_unmap_file, os_set_protection,
privload_check_new_map_bounds, memset,
privload_map_flags(MODLOAD_IS_APP /*!reachable*/));
privload_map_flags(MODLOAD_IS_APP /*!reachable*/), overlap_map_file_func);
apicheck(interp_map != NULL && is_elf_so_header(interp_map, 0),
"Failed to map ELF interpreter.");
/* On Android, the system loader /system/bin/linker sets itself
Expand Down
Loading

0 comments on commit 8fb15b7

Please sign in to comment.