數(shù)據(jù)結(jié)構(gòu)
struct PACKED(sizeof(void*)) tls_ptr_sized_values {
.......
// Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
uint8_t* thread_local_start;
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
// Thread local limit is how much we can expand the thread local buffer to, it is greater or
// equal to thread_local_end.
uint8_t* thread_local_limit;
//表示TLAB上分配的對象數(shù)量
size_t thread_local_objects;
.......
}
創(chuàng)建
void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) {
DCHECK_LE(start, end);
DCHECK_LE(end, limit);
tlsPtr_.thread_local_start = start;
tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start;
tlsPtr_.thread_local_end = end;
tlsPtr_.thread_local_limit = limit;
tlsPtr_.thread_local_objects = 0;
}
Thread對象分配TLAB的實現(xiàn)
可以使用BumpPointerSpace分配內(nèi)存
uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
bytes = RoundUp(bytes, kAlignment);
if (!num_blocks_) {
UpdateMainBlock();
}
uint8_t* storage = reinterpret_cast<uint8_t*>(
AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
if (LIKELY(storage != nullptr)) {
BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
header->size_ = bytes; // Write out the block header.
storage += sizeof(BlockHeader);
++num_blocks_;
}
return storage;
}
或使用RegionSpace分配
bool RegionSpace::AllocNewTlab(Thread* self,
const size_t tlab_size,
size_t* bytes_tl_bulk_allocated) {
MutexLock mu(self, region_lock_);
RevokeThreadLocalBuffersLocked(self, /*reuse=*/ gc::Heap::kUsePartialTlabs);
Region* r = nullptr;
uint8_t* pos = nullptr;
*bytes_tl_bulk_allocated = tlab_size;
// First attempt to get a partially used TLAB, if available.
if (tlab_size < kRegionSize) {
// Fetch the largest partial TLAB. The multimap is ordered in decreasing
// size.
auto largest_partial_tlab = partial_tlabs_.begin();
if (largest_partial_tlab != partial_tlabs_.end() && largest_partial_tlab->first >= tlab_size) {
r = largest_partial_tlab->second;
pos = r->End() - largest_partial_tlab->first;
partial_tlabs_.erase(largest_partial_tlab);
*bytes_tl_bulk_allocated -= r->Top() - pos;
}
}
if (r == nullptr) {
// Fallback to allocating an entire region as TLAB.
r = AllocateRegion(/*for_evac=*/ false);
}
if (r != nullptr) {
uint8_t* start = pos != nullptr ? pos : r->Begin();
DCHECK_ALIGNED(start, kObjectAlignment);
r->is_a_tlab_ = true;
r->thread_ = self;
r->SetTop(r->End());
self->SetTlab(start, start + tlab_size, r->End());
return true;
}
return false;
}
分配
inline mirror::Object* Thread::AllocTlab(size_t bytes) {
DCHECK_GE(TlabSize(), bytes);
++tlsPtr_.thread_local_objects;
mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos);
tlsPtr_.thread_local_pos += bytes;
return ret;
}