Branch data Line data Source code
1 : : // This file is a part of Julia. License is MIT: https://julialang.org/license 2 : : 3 : : #include "gc.h" 4 : : #ifndef _OS_WINDOWS_ 5 : : # include <sys/resource.h> 6 : : #endif 7 : : 8 : : #ifdef __cplusplus 9 : : extern "C" { 10 : : #endif 11 : : 12 : : // Try to allocate memory in chunks to permit faster allocation 13 : : // and improve memory locality of the pools 14 : : #ifdef _P64 15 : : #define DEFAULT_BLOCK_PG_ALLOC (4096) // 64 MB 16 : : #else 17 : : #define DEFAULT_BLOCK_PG_ALLOC (1024) // 16 MB 18 : : #endif 19 : : #define MIN_BLOCK_PG_ALLOC (1) // 16 KB 20 : : 21 : : static int block_pg_cnt = DEFAULT_BLOCK_PG_ALLOC; 22 : : static size_t current_pg_count = 0; 23 : : 24 : 573 : void jl_gc_init_page(void) 25 : : { 26 [ - + ]: 573 : if (GC_PAGE_SZ * block_pg_cnt < jl_page_size) 27 : 0 : block_pg_cnt = jl_page_size / GC_PAGE_SZ; // exact division 28 : 573 : } 29 : : 30 : : #ifndef MAP_NORESERVE // not defined in POSIX, FreeBSD, etc. 31 : : #define MAP_NORESERVE (0) 32 : : #endif 33 : : 34 : : // Try to allocate a memory block for multiple pages 35 : : // Return `NULL` if allocation failed. Result is aligned to `GC_PAGE_SZ`. 36 : 849 : static char *jl_gc_try_alloc_pages(int pg_cnt) JL_NOTSAFEPOINT 37 : : { 38 : 849 : size_t pages_sz = GC_PAGE_SZ * pg_cnt; 39 : : #ifdef _OS_WINDOWS_ 40 : : char *mem = (char*)VirtualAlloc(NULL, pages_sz + GC_PAGE_SZ, 41 : : MEM_RESERVE, PAGE_READWRITE); 42 : : if (mem == NULL) 43 : : return NULL; 44 : : #else 45 [ + - ]: 849 : if (GC_PAGE_SZ > jl_page_size) 46 : 849 : pages_sz += GC_PAGE_SZ; 47 : 849 : char *mem = (char*)mmap(0, pages_sz, PROT_READ | PROT_WRITE, 48 : : MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 49 [ - + ]: 849 : if (mem == MAP_FAILED) 50 : 0 : return NULL; 51 : : #endif 52 [ + - ]: 849 : if (GC_PAGE_SZ > jl_page_size) 53 : : // round data pointer up to the nearest gc_page_data-aligned 54 : : // boundary if mmap didn't already do so. 55 : 849 : mem = (char*)gc_page_data(mem + GC_PAGE_SZ - 1); 56 : 849 : return mem; 57 : : } 58 : : 59 : : // Allocate the memory for a new page. Starts with `block_pg_cnt` number 60 : : // of pages. Decrease 4x every time so that there are enough space for a few. 61 : : // more chunks (or other allocations). The final page count is recorded 62 : : // and will be used as the starting count next time. If the page count is 63 : : // smaller `MIN_BLOCK_PG_ALLOC` a `jl_memory_exception` is thrown. 64 : : // Assumes `gc_perm_lock` is acquired, the lock is released before the 65 : : // exception is thrown. 66 : 849 : static jl_gc_pagemeta_t *jl_gc_alloc_new_page(void) JL_NOTSAFEPOINT 67 : : { 68 : : // try to allocate a large block of memory (or a small one) 69 : 849 : unsigned pg, pg_cnt = block_pg_cnt; 70 : 849 : char *mem = NULL; 71 : 0 : while (1) { 72 [ + - ]: 849 : if (__likely((mem = jl_gc_try_alloc_pages(pg_cnt)))) 73 : 849 : break; 74 : 0 : size_t min_block_pg_alloc = MIN_BLOCK_PG_ALLOC; 75 [ # # ]: 0 : if (GC_PAGE_SZ * min_block_pg_alloc < jl_page_size) 76 : 0 : min_block_pg_alloc = jl_page_size / GC_PAGE_SZ; // exact division 77 [ # # ]: 0 : if (pg_cnt >= 4 * min_block_pg_alloc) { 78 : 0 : pg_cnt /= 4; 79 : 0 : block_pg_cnt = pg_cnt; 80 : : } 81 [ # # ]: 0 : else if (pg_cnt > min_block_pg_alloc) { 82 : 0 : block_pg_cnt = pg_cnt = min_block_pg_alloc; 83 : : } 84 : : else { 85 : 0 : uv_mutex_unlock(&gc_perm_lock); 86 : 0 : jl_throw(jl_memory_exception); 87 : : } 88 : : } 89 : : 90 : : // now need to insert these pages into the pagetable metadata 91 : : // if any allocation fails, this just stops recording more pages from that point 92 : : // and will free (munmap) the remainder 93 : : jl_gc_pagemeta_t *page_meta = 94 : 849 : (jl_gc_pagemeta_t*)jl_gc_perm_alloc_nolock(pg_cnt * sizeof(jl_gc_pagemeta_t), 1, 95 : : sizeof(void*), 0); 96 : 849 : pg = 0; 97 [ + - ]: 849 : if (page_meta) { 98 [ + + ]: 3478350 : for (; pg < pg_cnt; pg++) { 99 : : struct jl_gc_metadata_ext info; 100 : : uint32_t msk; 101 : : unsigned i; 102 : : pagetable1_t **ppagetable1; 103 : : pagetable0_t **ppagetable0; 104 : : jl_gc_pagemeta_t **pmeta; 105 : : 106 : 3477500 : char *ptr = mem + (GC_PAGE_SZ * pg); 107 : 3477500 : page_meta[pg].data = ptr; 108 : : 109 : : // create & store the level 2 / outermost info 110 : 3477500 : i = REGION_INDEX(ptr); 111 : 3477500 : info.pagetable_i = i % 32; 112 : 3477500 : info.pagetable_i32 = i / 32; 113 : 3477500 : msk = (1u << info.pagetable_i); 114 [ + + ]: 3477500 : if ((memory_map.freemap1[info.pagetable_i32] & msk) == 0) 115 : 849 : memory_map.freemap1[info.pagetable_i32] |= msk; // has free 116 : 3477500 : info.pagetable1 = *(ppagetable1 = &memory_map.meta1[i]); 117 [ + + ]: 3477500 : if (!info.pagetable1) { 118 : 573 : info.pagetable1 = (pagetable1_t*)jl_gc_perm_alloc_nolock(sizeof(pagetable1_t), 1, 119 : : sizeof(void*), 0); 120 : 573 : *ppagetable1 = info.pagetable1; 121 [ - + ]: 573 : if (!info.pagetable1) 122 : 0 : break; 123 : : } 124 : : 125 : : // create & store the level 1 info 126 : 3477500 : i = REGION1_INDEX(ptr); 127 : 3477500 : info.pagetable1_i = i % 32; 128 : 3477500 : info.pagetable1_i32 = i / 32; 129 : 3477500 : msk = (1u << info.pagetable1_i); 130 [ + + ]: 3477500 : if ((info.pagetable1->freemap0[info.pagetable1_i32] & msk) == 0) 131 : 906 : info.pagetable1->freemap0[info.pagetable1_i32] |= msk; // has free 132 : 3477500 : info.pagetable0 = *(ppagetable0 = &info.pagetable1->meta0[i]); 133 [ + + ]: 3477500 : if (!info.pagetable0) { 134 : 733 : info.pagetable0 = (pagetable0_t*)jl_gc_perm_alloc_nolock(sizeof(pagetable0_t), 1, 135 : : sizeof(void*), 0); 136 : 733 : *ppagetable0 = info.pagetable0; 137 [ - + ]: 733 : if (!info.pagetable0) 138 : 0 : break; 139 : : } 140 : : 141 : : // create & store the level 0 / page info 142 : 3477500 : i = REGION0_INDEX(ptr); 143 : 3477500 : info.pagetable0_i = i % 32; 144 : 3477500 : info.pagetable0_i32 = i / 32; 145 : 3477500 : msk = (1u << info.pagetable0_i); 146 : 3477500 : info.pagetable0->freemap[info.pagetable0_i32] |= msk; // is free 147 : 3477500 : pmeta = &info.pagetable0->meta[i]; 148 : 3477500 : info.meta = (*pmeta = &page_meta[pg]); 149 : : } 150 : : } 151 : : 152 [ - + ]: 849 : if (pg < pg_cnt) { 153 : : #ifndef _OS_WINDOWS_ 154 : : // Trim the allocation to only cover the region 155 : : // that we successfully created the metadata for. 156 : : // This is not supported by the Windows kernel, 157 : : // so we have to just skip it there and just lose these virtual addresses. 158 : 0 : munmap(mem + LLT_ALIGN(GC_PAGE_SZ * pg, jl_page_size), 159 : 0 : GC_PAGE_SZ * pg_cnt - LLT_ALIGN(GC_PAGE_SZ * pg, jl_page_size)); 160 : : #endif 161 [ # # ]: 0 : if (pg == 0) { 162 : 0 : uv_mutex_unlock(&gc_perm_lock); 163 : 0 : jl_throw(jl_memory_exception); 164 : : } 165 : : } 166 : 849 : return page_meta; 167 : : } 168 : : 169 : : // get a new page, either from the freemap 170 : : // or from the kernel if none are available 171 : 5921680 : NOINLINE jl_gc_pagemeta_t *jl_gc_alloc_page(void) JL_NOTSAFEPOINT 172 : : { 173 : : struct jl_gc_metadata_ext info; 174 : 5921680 : uv_mutex_lock(&gc_perm_lock); 175 : : 176 : 5921680 : int last_errno = errno; 177 : : #ifdef _OS_WINDOWS_ 178 : : DWORD last_error = GetLastError(); 179 : : #endif 180 : : // scan over memory_map page-table for existing allocated but unused pages 181 [ + + ]: 12876700 : for (info.pagetable_i32 = memory_map.lb; info.pagetable_i32 < (REGION2_PG_COUNT + 31) / 32; info.pagetable_i32++) { 182 : 12875800 : uint32_t freemap1 = memory_map.freemap1[info.pagetable_i32]; 183 [ + + ]: 12876100 : for (info.pagetable_i = 0; freemap1; info.pagetable_i++, freemap1 >>= 1) { 184 : 5921100 : unsigned next = ffs_u32(freemap1); 185 : 5921100 : info.pagetable_i += next; 186 : 5921100 : freemap1 >>= next; 187 : 5921100 : info.pagetable1 = memory_map.meta1[info.pagetable_i + info.pagetable_i32 * 32]; 188 : : // repeat over page-table level 1 189 [ + + ]: 7078500 : for (info.pagetable1_i32 = info.pagetable1->lb; info.pagetable1_i32 < REGION1_PG_COUNT / 32; info.pagetable1_i32++) { 190 : 7078220 : uint32_t freemap0 = info.pagetable1->freemap0[info.pagetable1_i32]; 191 [ + + ]: 7079010 : for (info.pagetable1_i = 0; freemap0; info.pagetable1_i++, freemap0 >>= 1) { 192 : 5921620 : unsigned next = ffs_u32(freemap0); 193 : 5921620 : info.pagetable1_i += next; 194 : 5921620 : freemap0 >>= next; 195 : 5921620 : info.pagetable0 = info.pagetable1->meta0[info.pagetable1_i + info.pagetable1_i32 * 32]; 196 : : // repeat over page-table level 0 197 [ + + ]: 7553610 : for (info.pagetable0_i32 = info.pagetable0->lb; info.pagetable0_i32 < REGION0_PG_COUNT / 32; info.pagetable0_i32++) { 198 : 7552820 : uint32_t freemap = info.pagetable0->freemap[info.pagetable0_i32]; 199 [ + + ]: 7552820 : if (freemap) { 200 : 5920830 : info.pagetable0_i = ffs_u32(freemap); 201 : 5920830 : info.meta = info.pagetable0->meta[info.pagetable0_i + info.pagetable0_i32 * 32]; 202 [ - + ]: 5920830 : assert(info.meta->data); 203 : : // new pages available starting at min of lb and pagetable_i32 204 [ - + ]: 5920830 : if (memory_map.lb < info.pagetable_i32) 205 : 0 : memory_map.lb = info.pagetable_i32; 206 [ + + ]: 5920830 : if (info.pagetable1->lb < info.pagetable1_i32) 207 : 572 : info.pagetable1->lb = info.pagetable1_i32; 208 [ + + ]: 5920830 : if (info.pagetable0->lb < info.pagetable0_i32) 209 : 350441 : info.pagetable0->lb = info.pagetable0_i32; 210 : 5920830 : goto have_free_page; // break out of all of these loops 211 : : } 212 : : } 213 : 788 : info.pagetable1->freemap0[info.pagetable1_i32] &= ~(uint32_t)(1u << info.pagetable1_i); // record that this was full 214 : : } 215 : : } 216 : 276 : memory_map.freemap1[info.pagetable_i32] &= ~(uint32_t)(1u << info.pagetable_i); // record that this was full 217 : : } 218 : : } 219 : : 220 : : // no existing pages found, allocate a new one 221 : : { 222 : 849 : jl_gc_pagemeta_t *meta = jl_gc_alloc_new_page(); 223 : 849 : info = page_metadata_ext(meta->data); 224 [ - + ]: 849 : assert(meta == info.meta); 225 : : // new pages are now available starting at max of lb and pagetable_i32 226 [ - + ]: 849 : if (memory_map.lb > info.pagetable_i32) 227 : 0 : memory_map.lb = info.pagetable_i32; 228 [ + + ]: 849 : if (info.pagetable1->lb > info.pagetable1_i32) 229 : 8 : info.pagetable1->lb = info.pagetable1_i32; 230 [ + + ]: 849 : if (info.pagetable0->lb > info.pagetable0_i32) 231 : 157 : info.pagetable0->lb = info.pagetable0_i32; 232 : : } 233 : : 234 : 692 : have_free_page: 235 : : // in-use pages are now ending at min of ub and pagetable_i32 236 [ - + ]: 5921680 : if (memory_map.ub < info.pagetable_i32) 237 : 0 : memory_map.ub = info.pagetable_i32; 238 [ + + ]: 5921680 : if (info.pagetable1->ub < info.pagetable1_i32) 239 : 573 : info.pagetable1->ub = info.pagetable1_i32; 240 [ + + ]: 5921680 : if (info.pagetable0->ub < info.pagetable0_i32) 241 : 35808 : info.pagetable0->ub = info.pagetable0_i32; 242 : : 243 : : // mark this entry as in-use and not free 244 : 5921680 : info.pagetable0->freemap[info.pagetable0_i32] &= ~(uint32_t)(1u << info.pagetable0_i); 245 : 5921680 : info.pagetable0->allocmap[info.pagetable0_i32] |= (uint32_t)(1u << info.pagetable0_i); 246 : 5921680 : info.pagetable1->allocmap0[info.pagetable1_i32] |= (uint32_t)(1u << info.pagetable1_i); 247 : 5921680 : memory_map.allocmap1[info.pagetable_i32] |= (uint32_t)(1u << info.pagetable_i); 248 : : 249 : : #ifdef _OS_WINDOWS_ 250 : : VirtualAlloc(info.meta->data, GC_PAGE_SZ, MEM_COMMIT, PAGE_READWRITE); 251 : : #endif 252 : : #ifdef _OS_WINDOWS_ 253 : : SetLastError(last_error); 254 : : #endif 255 : 5921680 : errno = last_errno; 256 : 5921680 : current_pg_count++; 257 : : gc_final_count_page(current_pg_count); 258 : 5921680 : uv_mutex_unlock(&gc_perm_lock); 259 : 5921680 : return info.meta; 260 : : } 261 : : 262 : : // return a page to the freemap allocator 263 : 4465760 : void jl_gc_free_page(void *p) JL_NOTSAFEPOINT 264 : : { 265 : : // update the allocmap and freemap to indicate this contains a free entry 266 : 4465760 : struct jl_gc_metadata_ext info = page_metadata_ext(p); 267 : : uint32_t msk; 268 : 4465760 : msk = (uint32_t)(1u << info.pagetable0_i); 269 [ - + ]: 4465760 : assert(!(info.pagetable0->freemap[info.pagetable0_i32] & msk)); 270 [ - + ]: 4465760 : assert(info.pagetable0->allocmap[info.pagetable0_i32] & msk); 271 : 4465760 : info.pagetable0->allocmap[info.pagetable0_i32] &= ~msk; 272 : 4465760 : info.pagetable0->freemap[info.pagetable0_i32] |= msk; 273 : : 274 : 4465760 : msk = (uint32_t)(1u << info.pagetable1_i); 275 [ - + ]: 4465760 : assert(info.pagetable1->allocmap0[info.pagetable1_i32] & msk); 276 [ + + ]: 4465760 : if ((info.pagetable1->freemap0[info.pagetable1_i32] & msk) == 0) 277 : 588 : info.pagetable1->freemap0[info.pagetable1_i32] |= msk; 278 : : 279 : 4465760 : msk = (uint32_t)(1u << info.pagetable_i); 280 [ - + ]: 4465760 : assert(memory_map.allocmap1[info.pagetable_i32] & msk); 281 [ - + ]: 4465760 : if ((memory_map.freemap1[info.pagetable_i32] & msk) == 0) 282 : 0 : memory_map.freemap1[info.pagetable_i32] |= msk; 283 : : 284 : 4465760 : free(info.meta->ages); 285 : 4465760 : info.meta->ages = NULL; 286 : : 287 : : // tell the OS we don't need these pages right now 288 : 4465760 : size_t decommit_size = GC_PAGE_SZ; 289 [ - + ]: 4465760 : if (GC_PAGE_SZ < jl_page_size) { 290 : : // ensure so we don't release more memory than intended 291 : 0 : size_t n_pages = jl_page_size / GC_PAGE_SZ; // exact division 292 : 0 : decommit_size = jl_page_size; 293 : 0 : void *otherp = (void*)((uintptr_t)p & ~(jl_page_size - 1)); // round down to the nearest physical page 294 : 0 : p = otherp; 295 [ # # ]: 0 : while (n_pages--) { 296 : 0 : struct jl_gc_metadata_ext info = page_metadata_ext(otherp); 297 : 0 : msk = (uint32_t)(1u << info.pagetable0_i); 298 [ # # ]: 0 : if (info.pagetable0->allocmap[info.pagetable0_i32] & msk) 299 : 0 : goto no_decommit; 300 : 0 : otherp = (void*)((char*)otherp + GC_PAGE_SZ); 301 : : } 302 : : } 303 : : #ifdef _OS_WINDOWS_ 304 : : VirtualFree(p, decommit_size, MEM_DECOMMIT); 305 : : #else 306 : 4465760 : madvise(p, decommit_size, MADV_DONTNEED); 307 : : #endif 308 : : 309 : 4465760 : no_decommit: 310 : : // new pages are now available starting at max of lb and pagetable_i32 311 [ - + ]: 4465760 : if (memory_map.lb > info.pagetable_i32) 312 : 0 : memory_map.lb = info.pagetable_i32; 313 [ - + ]: 4465760 : if (info.pagetable1->lb > info.pagetable1_i32) 314 : 0 : info.pagetable1->lb = info.pagetable1_i32; 315 [ + + ]: 4465760 : if (info.pagetable0->lb > info.pagetable0_i32) 316 : 2411 : info.pagetable0->lb = info.pagetable0_i32; 317 : 4465760 : current_pg_count--; 318 : 4465760 : } 319 : : 320 : : #ifdef __cplusplus 321 : : } 322 : : #endif