|
| 1 | +// Copyright 2019 King's College London. |
| 2 | +// Created by the Software Development Team <http://soft-dev.org/>. |
| 3 | +// |
| 4 | +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 5 | +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 6 | +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
| 7 | +// option. This file may not be copied, modified, or distributed |
| 8 | +// except according to those terms. |
| 9 | + |
| 10 | +#include <stdint.h> |
| 11 | +#include <stdlib.h> |
| 12 | +#include <err.h> |
| 13 | +#include <stdbool.h> |
| 14 | +#include <stdatomic.h> |
| 15 | + |
| 16 | +struct mir_loc { |
| 17 | + uint64_t crate_hash; |
| 18 | + uint32_t def_idx; |
| 19 | + uint32_t bb_idx; |
| 20 | +}; |
| 21 | + |
| 22 | +#define TL_TRACE_INIT_CAP 1024 |
| 23 | +#define TL_TRACE_REALLOC_CAP 1024 |
| 24 | + |
| 25 | +void yk_swt_start_tracing_impl(void); |
| 26 | +void yk_swt_rec_loc_impl(uint64_t crate_hash, uint32_t def_idx, uint32_t bb_idx); |
| 27 | +struct mir_loc *yk_swt_stop_tracing_impl(size_t *ret_trace_len); |
| 28 | +void yk_swt_invalidate_trace_impl(void); |
| 29 | + |
| 30 | +// The trace buffer. |
| 31 | +static __thread struct mir_loc *trace_buf = NULL; |
| 32 | +// The number of elements in the trace buffer. |
| 33 | +static __thread size_t trace_buf_len = 0; |
| 34 | +// The allocation capacity of the trace buffer (in elements). |
| 35 | +static __thread size_t trace_buf_cap = 0; |
| 36 | +// Is the current thread tracing? |
| 37 | +// true = we are tracing, false = we are not tracing or an error occurred. |
| 38 | +static __thread volatile atomic_bool tracing = false; |
| 39 | + |
| 40 | +// Start tracing on the current thread. |
| 41 | +// A new trace buffer is allocated and MIR locations will be written into it on |
| 42 | +// subsequent calls to `yk_swt_rec_loc_impl`. If the current thread is already |
| 43 | +// tracing, calling this will lead to undefined behaviour. |
| 44 | +void |
| 45 | +yk_swt_start_tracing_impl(void) { |
| 46 | + trace_buf = calloc(TL_TRACE_INIT_CAP, sizeof(struct mir_loc)); |
| 47 | + if (trace_buf == NULL) { |
| 48 | + err(EXIT_FAILURE, "%s: calloc: ", __func__); |
| 49 | + } |
| 50 | + |
| 51 | + trace_buf_cap = TL_TRACE_INIT_CAP; |
| 52 | + atomic_store_explicit(&tracing, true, memory_order_relaxed); |
| 53 | +} |
| 54 | + |
| 55 | +// Record a location into the trace buffer if tracing is enabled on the current thread. |
| 56 | +void |
| 57 | +yk_swt_rec_loc_impl(uint64_t crate_hash, uint32_t def_idx, uint32_t bb_idx) |
| 58 | +{ |
| 59 | + if (!atomic_load_explicit(&tracing, memory_order_relaxed)) { |
| 60 | + return; |
| 61 | + } |
| 62 | + |
| 63 | + // Check if we need more space and reallocate if necessary. |
| 64 | + if (trace_buf_len == trace_buf_cap) { |
| 65 | + if (trace_buf_cap >= SIZE_MAX - TL_TRACE_REALLOC_CAP) { |
| 66 | + // Trace capacity would overflow. |
| 67 | + atomic_store_explicit(&tracing, false, memory_order_relaxed); |
| 68 | + return; |
| 69 | + } |
| 70 | + size_t new_cap = trace_buf_cap + TL_TRACE_REALLOC_CAP; |
| 71 | + |
| 72 | + if (new_cap > SIZE_MAX / sizeof(struct mir_loc)) { |
| 73 | + // New buffer size would overflow. |
| 74 | + atomic_store_explicit(&tracing, false, memory_order_relaxed); |
| 75 | + return; |
| 76 | + } |
| 77 | + size_t new_size = new_cap * sizeof(struct mir_loc); |
| 78 | + |
| 79 | + trace_buf = realloc(trace_buf, new_size); |
| 80 | + if (trace_buf == NULL) { |
| 81 | + atomic_store_explicit(&tracing, false, memory_order_relaxed); |
| 82 | + return; |
| 83 | + } |
| 84 | + |
| 85 | + trace_buf_cap = new_cap; |
| 86 | + } |
| 87 | + |
| 88 | + struct mir_loc loc = { crate_hash, def_idx, bb_idx }; |
| 89 | + trace_buf[trace_buf_len] = loc; |
| 90 | + trace_buf_len ++; |
| 91 | +} |
| 92 | + |
| 93 | + |
| 94 | +// Stop tracing on the current thread. |
| 95 | +// On success the trace buffer is returned and the number of locations it |
| 96 | +// holds is written to `*ret_trace_len`. It is the responsibility of the caller |
| 97 | +// to free the returned trace buffer. A NULL pointer is returned on error. |
| 98 | +// Calling this function when tracing was not started with |
| 99 | +// `yk_swt_start_tracing_impl()` results in undefined behaviour. |
| 100 | +struct mir_loc * |
| 101 | +yk_swt_stop_tracing_impl(size_t *ret_trace_len) { |
| 102 | + if (!atomic_load_explicit(&tracing, memory_order_relaxed)) { |
| 103 | + free(trace_buf); |
| 104 | + trace_buf = NULL; |
| 105 | + trace_buf_len = 0; |
| 106 | + } |
| 107 | + |
| 108 | + // We hand ownership of the trace to Rust now. Rust is responsible for |
| 109 | + // freeing the trace. |
| 110 | + struct mir_loc *ret_trace = trace_buf; |
| 111 | + *ret_trace_len = trace_buf_len; |
| 112 | + |
| 113 | + // Now reset all off the recorder's state. |
| 114 | + // We reset `trace_invalid` when tracing is restarted, because signals |
| 115 | + // handlers which set this flag may arrive in the meantime. |
| 116 | + trace_buf = NULL; |
| 117 | + tracing = false; |
| 118 | + trace_buf_len = 0; |
| 119 | + trace_buf_cap = 0; |
| 120 | + |
| 121 | + return ret_trace; |
| 122 | +} |
| 123 | + |
| 124 | +// Call this to safely mark the trace invalid. |
| 125 | +void |
| 126 | +yk_swt_invalidate_trace_impl(void) { |
| 127 | + // We don't free the trace buffer here, as this may be called in a signal |
| 128 | + // handler and thus needs to be reentrant. |
| 129 | + atomic_store_explicit(&tracing, false, memory_order_relaxed); |
| 130 | +} |
0 commit comments