static__always_inlinevoidtcache_put(mchunkptrchunk,size_ttc_idx){tcache_entry*e=(tcache_entry*)chunk2mem(chunk);/* Mark this chunk as "in the tcache" so the test in _int_free will
detect a double free. */e->key=tcache;e->next=PROTECT_PTR(&e->next,tcache->entries[tc_idx]);tcache->entries[tc_idx]=e;++(tcache->counts[tc_idx]);}static__always_inlinevoid*tcache_get(size_ttc_idx){tcache_entry*e=tcache->entries[tc_idx];if(__glibc_unlikely(!aligned_OK(e)))malloc_printerr("malloc(): unaligned tcache chunk detected");tcache->entries[tc_idx]=REVEAL_PTR(e->next);--(tcache->counts[tc_idx]);e->key=NULL;return(void*)e;}
if(SINGLE_THREAD_P){/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */if(__builtin_expect(old==p,0))malloc_printerr("double free or corruption (fasttop)");p->fd=PROTECT_PTR(&p->fd,old);*fb=p;}elsedo{/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */if(__builtin_expect(old==p,0))malloc_printerr("double free or corruption (fasttop)");old2=old;p->fd=PROTECT_PTR(&p->fd,old);}while((old=catomic_compare_and_exchange_val_rel(fb,p,old2))!=old2);
/* Safe-Linking:
Use randomness from ASLR (mmap_base) to protect single-linked lists
of Fast-Bins and TCache. That is, mask the "next" pointers of the
lists' chunks, and also perform allocation alignment checks on them.
This mechanism reduces the risk of pointer hijacking, as was done with
Safe-Unlinking in the double-linked lists of Small-Bins.
It assumes a minimum page size of 4096 bytes (12 bits). Systems with
larger pages provide less entropy, although the pointer mangling
still works. */#define PROTECT_PTR(pos, ptr) \
((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)