概述
在art内部对线程Thread的实现中,会对函数跳转表进行初始化,这些函数跳转表的初始化在不同的平台有不同的实现,通常是记录一些关键的汇编函数的入口。Thread类有个struct tls_ptr_sized_values结构体成员,struct tls_ptr_sized_values的interpreter_entrypoints,jni_entrypoints,portable_entrypoints和quick_entrypoints用来分类保存函数跳转入口。这些函数跳转入口通过访问线程Thread对应的偏移量进入。例如下面的一段本地代码,就是将Thread类+292的偏移地址保存到lr,再用blx跳转过去,实际上就是进入pAllocObject函数。
0x00001240: f8d9e124
ldr.w
lr, [r9, #292]
; pAllocObject
0x00001244: 1c06
mov
r6, r0
0x00001246: 1c29
mov
r1, r5
0x00001248: 2007
movs
r0, #7
0x0000124a: 47f0
blx
lr
/art/runtime/thread.cc
void Thread::InitTlsEntryPoints() {
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) +
sizeof(tlsPtr_.quick_entrypoints));
for (uintptr_t* it = begin; it != end; ++it) {
*it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
}
InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
&tlsPtr_.portable_entrypoints, &tlsPtr_.quick_entrypoints);
}
我们只关注alloc部分,即ResetQuickAllocEntryPoints函数。
/art/runtime/arch/arm/entrypoints_init_arm.cc
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
// Interpreter
ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
// Portable
ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
// Alloc
ResetQuickAllocEntryPoints(qpoints);
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
qpoints->pCheckCast = art_quick_check_cast;
// DexCache
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
qpoints->pInitializeType = art_quick_initialize_type;
qpoints->pResolveString = art_quick_resolve_string;
// Field
qpoints->pSet32Instance = art_quick_set32_instance;
qpoints->pSet32Static = art_quick_set32_static;
qpoints->pSet64Instance = art_quick_set64_instance;
qpoints->pSet64Static = art_quick_set64_static;
qpoints->pSetObjInstance = art_quick_set_obj_instance;
qpoints->pSetObjStatic = art_quick_set_obj_static;
qpoints->pGet32Instance = art_quick_get32_instance;
qpoints->pGet64Instance = art_quick_get64_instance;
qpoints->pGetObjInstance = art_quick_get_obj_instance;
qpoints->pGet32Static = art_quick_get32_static;
qpoints->pGet64Static = art_quick_get64_static;
qpoints->pGetObjStatic = art_quick_get_obj_static;
// Array
qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
qpoints->pAputObject = art_quick_aput_obj;
qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
// JNI
qpoints->pJniMethodStart = JniMethodStart;
qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
qpoints->pJniMethodEnd = JniMethodEnd;
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
// Locks
qpoints->pLockObject = art_quick_lock_object;
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
qpoints->pCmpgDouble = CmpgDouble;
qpoints->pCmpgFloat = CmpgFloat;
qpoints->pCmplDouble = CmplDouble;
qpoints->pCmplFloat = CmplFloat;
qpoints->pFmod = fmod;
qpoints->pL2d = __aeabi_l2d;
qpoints->pFmodf = fmodf;
qpoints->pL2f = __aeabi_l2f;
qpoints->pD2iz = __aeabi_d2iz;
qpoints->pF2iz = __aeabi_f2iz;
qpoints->pIdivmod = __aeabi_idivmod;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = __aeabi_ldivmod;
qpoints->pLmod = __aeabi_ldivmod;
// result returned in r2:r3
qpoints->pLmul = art_quick_mul_long;
qpoints->pShlLong = art_quick_shl_long;
qpoints->pShrLong = art_quick_shr_long;
qpoints->pUshrLong = art_quick_ushr_long;
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = memcpy;
// Invocation
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
// Throws
qpoints->pDeliverException = art_quick_deliver_exception;
qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
qpoints->pThrowDivZero = art_quick_throw_div_zero;
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
};
entry_points_allocator是一个静态变量,被初始化为kAllocatorTypeDlMalloc,表示将会使用DlMalloc的分配器入口。可以在其他地方调用SetQuickAllocEntryPointsAllocator改变entry_points_allocator的值,从而改变线程的分配器入口。
/art/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
switch (entry_points_allocator) {
#if !defined(__APPLE__) || !defined(__LP64__)
case gc::kAllocatorTypeDlMalloc: {
SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
break;
}
case gc::kAllocatorTypeRosAlloc: {
SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
break;
}
case gc::kAllocatorTypeBumpPointer: {
CHECK(kMovingCollector);
SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
break;
}
case gc::kAllocatorTypeTLAB: {
CHECK(kMovingCollector);
SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
break;
}
#endif
default: {
LOG(FATAL) << "Unimplemented";
}
}
}
SetQuickAllocEntryPointsAllocator会在ChangeAllocator(修改分配器)时被调用,ChangeAllocator会在ChangeCollector(修改垃圾收集方式)时被调用。
根据ChangeCollector传入的CollectorType参数不同,设置不同的分配器。
/art/runtime/gc/heap.cc
void Heap::ChangeCollector(CollectorType collector_type) {
// TODO: Only do this with all mutators suspended to avoid races.
if (collector_type != collector_type_) {
if (collector_type == kCollectorTypeMC) {
// Don't allow mark compact unless support is compiled in.
CHECK(kMarkCompactSupport);
}
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
case kCollectorTypeCC:
// Fall-through.
case kCollectorTypeMC:
// Fall-through.
case kCollectorTypeSS:
// Fall-through.
case kCollectorTypeGSS: {
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
ChangeAllocator(kAllocatorTypeTLAB);
} else {
ChangeAllocator(kAllocatorTypeBumpPointer);
}
break;
}
case kCollectorTypeMS: {
gc_plan_.push_back(collector::kGcTypeSticky);
gc_plan_.push_back(collector::kGcTypePartial);
gc_plan_.push_back(collector::kGcTypeFull);
ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
break;
}
case kCollectorTypeCMS: {
gc_plan_.push_back(collector::kGcTypeSticky);
gc_plan_.push_back(collector::kGcTypePartial);
gc_plan_.push_back(collector::kGcTypeFull);
ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
break;
}
default: {
LOG(FATAL) << "Unimplemented";
}
}
if (IsGcConcurrent()) {
concurrent_start_bytes_ =
std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
} else {
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
}
}
ChangeCollector会在三处地方被调用。1.在Heap堆创建时,会调用ChangeCollector将当前的垃圾收集方式collector_type_修改为desired_collector_type_,desired_collector_type_与foreground_collector_type_相同,均为kCollectorTypeCMS,由代码可以看到,由于kUseRosAlloc为true,这时使用的分配器为kAllocatorTypeRosAlloc。2.发生在垃圾收集方式切换的时候。3.调用Heap::PreZygoteFork时,将当前的垃圾收集方式collector_type_修改为foreground_collector_type_,这时候使用的是kAllocatorTypeRosAlloc。
垃圾收集方式切换一般发生在应用程序的前后台状态发生变化时。前台状态,即是用户可以感知到的状态,用枚举变量kProcessStateJankPerceptible表示;后台状态,即是用户无法直接感知到的状态,用枚举变量kProcessStateJankImperceptible表示。
/art/runtime/gc/heap.h
// The process state passed in from the activity manager, used to determine when to do trimming
// and compaction.
enum ProcessState {
kProcessStateJankPerceptible = 0,
kProcessStateJankImperceptible = 1,
};
在ActivityThread里面有个更新应用进程状态的函数:updateProcessState。首先判断进程状态是否发生了变化,如果有变化,根据状态值是否小于ActivityManager.PROCESS_STATE_IMPORTANT_FOREGROUND来判断是否是前后台状态的变化。若发生了前后台状态变化,则需要进行垃圾收集方式的切换。
/frameworks/base/core/java/android/app/ActivityThread.java
public void updateProcessState(int processState, boolean fromIpc) {
synchronized (this) {
if (mLastProcessState != processState) {
mLastProcessState = processState;
// Update Dalvik state based on ActivityManager.PROCESS_STATE_* constants.
final int DALVIK_PROCESS_STATE_JANK_PERCEPTIBLE = 0;
final int DALVIK_PROCESS_STATE_JANK_IMPERCEPTIBLE = 1;
int dalvikProcessState = DALVIK_PROCESS_STATE_JANK_IMPERCEPTIBLE;
// TODO: Tune this since things like gmail sync are important background but not jank perceptible.
if (processState <= ActivityManager.PROCESS_STATE_IMPORTANT_FOREGROUND) {
dalvikProcessState = DALVIK_PROCESS_STATE_JANK_PERCEPTIBLE;
}
VMRuntime.getRuntime().updateProcessState(dalvikProcessState);
if (false) {
Slog.i(TAG, "******************* PROCESS STATE CHANGED TO: " + processState
+ (fromIpc ? " (from ipc": ""));
}
}
}
}
当需要切换到前台状态时,使用RequestCollectorTransition切换垃圾收集方式为foreground_collector_type_;当需要切换到后台状态时,使用RequestCollectorTransition切换垃圾收集方式为
background_collector_type_。
/art/runtime/gc/heap.cc
void Heap::UpdateProcessState(ProcessState process_state) {
if (process_state_ != process_state) {
process_state_ = process_state;
for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
// Start at index 1 to avoid "is always false" warning.
// Have iteration 1 always transition the collector.
TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
? foreground_collector_type_ : background_collector_type_);
usleep(kCollectorTransitionStressWait);
}
if (process_state_ == kProcessStateJankPerceptible) {
// Transition back to foreground right away to prevent jank.
RequestCollectorTransition(foreground_collector_type_, 0);
} else {
// Don't delay for debug builds since we may want to stress test the GC.
// If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
// special handling which does a homogenous space compaction once but then doesn't transition
// the collector.
RequestCollectorTransition(background_collector_type_,
kIsDebugBuild ? 0 : kCollectorTransitionWait);
}
}
}
RequestCollectorTransition就是将desired_collector_type_设置为入参表示的垃圾收集方式,并且调用java层Daemons类的requestHeapTrim进行堆裁剪。
/art/runtime/gc/heap.cc
void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
if (desired_collector_type_ == desired_collector_type) {
return;
}
heap_transition_or_trim_target_time_ =
std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
desired_collector_type_ = desired_collector_type;
}
SignalHeapTrimDaemon(self);
}
&emsp;由log可以看到,RequestCollectorTransition主要是在foreground_collector_type_和background_collector_type_之间进行切换,并改变desired_collector_type_的值。在《ART虚拟机堆的创建过程》中提到,Heap堆创建时,
foreground_collector_type_为kCollectorTypeCMS,
background_collector_type_为kCollectorTypeHomogeneousSpaceCompact,
desired_collector_type_跟foreground_collector_type_相同。
$ logcat -v time|grep RequestCollectorTransition
01-01 20:00:15.875 I/art
( 2543): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:15.942 I/art
( 1856): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.055 I/art
( 1856): RequestCollectorTransition:CollectorTypeHomogeneousSpaceCompact-->CollectorTypeCMS
01-01 20:00:16.067 I/art
( 1856): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.154 I/art
( 2564): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.365 I/art
( 2585): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.369 I/art
( 1856): RequestCollectorTransition:CollectorTypeHomogeneousSpaceCompact-->CollectorTypeCMS
01-01 20:00:16.511 I/art
( 1856): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.608 I/art
( 2616): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.694 I/art
( 2636): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.839 I/art
( 2655): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:16.972 I/art
( 2679): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:17.153 I/art
( 2704): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:00:17.674 I/art
( 2753): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
01-01 20:01:15.182 I/art
( 2801): RequestCollectorTransition:CollectorTypeCMS-->CollectorTypeHomogeneousSpaceCompact
最终会调用到Heap::DoPendingTransitionOrTrim函数。
如果是切换到background_collector_type_,该线程会睡眠5秒,这个时候使用的仍然是foreground_collector_type_。对于切换到后台状态的进程,我们并不关心暂停时间,所以CareAboutPauseTimes返回false,所以5秒后将会调用PerformHomogeneousSpaceCompact进行一次同构空间压缩,然后返回,不需要进行额外的堆裁剪。
如果是切换到foreground_collector_type_,则调用TransitionCollector进行垃圾收集方式的切换,切换完后调用Heap::Trim()进行堆裁剪。
/art/runtime/gc/heap.cc
void Heap::DoPendingTransitionOrTrim() {
Thread* self = Thread::Current();
CollectorType desired_collector_type;
// Wait until we reach the desired transition time.
while (true) {
uint64_t wait_time;
{
MutexLock mu(self, *heap_trim_request_lock_);
desired_collector_type = desired_collector_type_;
uint64_t current_time = NanoTime();
if (current_time >= heap_transition_or_trim_target_time_) {
break;
}
wait_time = heap_transition_or_trim_target_time_ - current_time;
}
ScopedThreadStateChange tsc(self, kSleeping);
usleep(wait_time / 1000);
// Usleep takes microseconds.
}
// Launch homogeneous space compaction if it is desired.
if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
if (!CareAboutPauseTimes()) {
PerformHomogeneousSpaceCompact();
}
// No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
desired_collector_type = collector_type_;
return;
}
// Transition the collector if the desired collector type is not the same as the current
// collector type.
TransitionCollector(desired_collector_type);
if (!CareAboutPauseTimes()) {
// Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
// about pauses.
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll();
uint64_t start_time = NanoTime();
size_t count = runtime->GetMonitorList()->DeflateMonitors();
VLOG(heap) << "Deflating " << count << " monitors took "
<< PrettyDuration(NanoTime() - start_time);
runtime->GetThreadList()->ResumeAll();
}
// Do a heap trim if it is needed.
Trim();
}
在for循环里面,首先是等待当前正在执行的gc完成,然后当原来的垃圾收集方式和切换过去的垃圾收集方式均为MovingGc或MovingGc未被禁用时,跳出for循环。
然后用ThreadList::SuspendAll阻塞当前线程外的其他线程。这里传入到TransitionCollector的参数为kCollectorTypeCMS,当前的垃圾收集方式collector_type_不是MovingGc,所以”case kCollectorTypeCMS:”部分的内容不会被执行,直接进入到ChangeCollector函数。最后恢复之前被阻塞的线程,做一些收尾工作。
在ChangeCollector函数中,将往gc_plan_添加三种gc类型:kGcTypeSticky,kGcTypePartial和kGcTypeFull,且将分配器设置为kAllocatorTypeRosAlloc。
现在可以知道,由于Heap堆创建,Zygote fork出新进程和应用进程从后台状态切换到前台状态时都会调用ChangeCollector,而且入参为kCollectorTypeCMS,所以大部分情况下entry_points_allocator这个值为kAllocatorTypeRosAlloc。
/art/runtime/gc/heap.cc
void Heap::TransitionCollector(CollectorType collector_type) {
if (collector_type == collector_type_) {
return;
}
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Runtime* const runtime = Runtime::Current();
ThreadList* const tl = runtime->GetThreadList();
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
// Busy wait until we can GC (StartGC can fail if we have a non-zero
// compacting_gc_disable_count_, this should rarely occurs).
for (;;) {
{
ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
// Currently we only need a heap transition if we switch from a moving collector to a
// non-moving one, or visa versa.
const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
// If someone else beat us to it and changed the collector before we could, exit.
// This is safe to do before the suspend all since we set the collector_type_running_ before
// we exit the loop. If another thread attempts to do the heap transition before we exit,
// then it would get blocked on WaitForGcToCompleteLocked.
if (collector_type == collector_type_) {
return;
}
// GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
if (!copying_transition || disable_moving_gc_count_ == 0) {
// TODO: Not hard code in semi-space collector?
collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
break;
}
}
usleep(1000);
}
if (runtime->IsShuttingDown(self)) {
// Don't allow heap transitions to happen if the runtime is shutting down since these can
// cause objects to get finalized.
FinishGC(self, collector::kGcTypeNone);
return;
}
tl->SuspendAll();
switch (collector_type) {
case kCollectorTypeSS: {
if (!IsMovingGc(collector_type_)) {
// Create the bump pointer space from the backup space.
CHECK(main_space_backup_ != nullptr);
std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
// We are transitioning from non moving GC -> moving GC, since we copied from the bump
// pointer space last transition it will be protected.
CHECK(mem_map != nullptr);
mem_map->Protect(PROT_READ | PROT_WRITE);
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
mem_map.release());
AddSpace(bump_pointer_space_);
Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
// Use the now empty main space mem map for the bump pointer temp space.
mem_map.reset(main_space_->ReleaseMemMap());
// Unset the pointers just in case.
if (dlmalloc_space_ == main_space_) {
dlmalloc_space_ = nullptr;
} else if (rosalloc_space_ == main_space_) {
rosalloc_space_ = nullptr;
}
// Remove the main space so that we don't try to trim it, this doens't work for debug
// builds since RosAlloc attempts to read the magic number from a protected page.
RemoveSpace(main_space_);
RemoveRememberedSet(main_space_);
delete main_space_;
// Delete the space since it has been removed.
main_space_ = nullptr;
RemoveRememberedSet(main_space_backup_.get());
main_space_backup_.reset(nullptr);
// Deletes the space.
temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
mem_map.release());
AddSpace(temp_space_);
}
break;
}
case kCollectorTypeMS:
// Fall through.
case kCollectorTypeCMS: {
if (IsMovingGc(collector_type_)) {
CHECK(temp_space_ != nullptr);
std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
RemoveSpace(temp_space_);
temp_space_ = nullptr;
mem_map->Protect(PROT_READ | PROT_WRITE);
CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
std::min(mem_map->Size(), growth_limit_), mem_map->Size());
mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
mem_map.reset(bump_pointer_space_->ReleaseMemMap());
RemoveSpace(bump_pointer_space_);
bump_pointer_space_ = nullptr;
const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
// Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
if (kIsDebugBuild && kUseRosAlloc) {
mem_map->Protect(PROT_READ | PROT_WRITE);
}
main_space_backup_.reset(CreateMallocSpaceFromMemMap(
mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
mem_map->Size(), name, true));
if (kIsDebugBuild && kUseRosAlloc) {
mem_map->Protect(PROT_NONE);
}
mem_map.release();
}
break;
}
default: {
LOG(FATAL) << "Attempted to transition to invalid collector type "
<< static_cast<size_t>(collector_type);
break;
}
}
ChangeCollector(collector_type);
tl->ResumeAll();
// Can't call into java code with all threads suspended.
reference_processor_.EnqueueClearedReferences(self);
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
int32_t delta_allocated = before_allocated - after_allocated;
std::string saved_str;
if (delta_allocated >= 0) {
saved_str = " saved at least " + PrettySize(delta_allocated);
} else {
saved_str = " expanded " + PrettySize(-delta_allocated);
}
VLOG(heap) << "Heap transition to " << process_state_ << " took "
<< PrettyDuration(duration) << saved_str;
}
回到alloc跳转函数的初始化中。
/art/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
case gc::kAllocatorTypeRosAlloc: {
SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
break;
}
GENERATE_ENTRYPOINTS这个宏定义声明了一些汇编函数,并且将一些分配器入口设置为这些汇编函数。当我们需要为对象分配空间时,就会根据对象的类型选择合适的汇编入口。
/art/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
#define GENERATE_ENTRYPOINTS(suffix)
extern "C" void* art_quick_alloc_array##suffix(uint32_t, void*, int32_t);
extern "C" void* art_quick_alloc_array_resolved##suffix(void* klass, void*, int32_t);
extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t);
extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, void* method);
extern "C" void* art_quick_alloc_object_resolved##suffix(void* klass, void* method);
extern "C" void* art_quick_alloc_object_initialized##suffix(void* klass, void* method);
extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, void* method);
extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, void*, int32_t);
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t);
extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t);
extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(void* klass, void*, int32_t);
extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t);
extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, void* method);
extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(void* klass, void* method);
extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(void* klass, void* method);
extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, void* method);
extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t);
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t);
void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) {
if (instrumented) {
qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented;
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented;
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented;
qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented;
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented;
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented;
qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented;
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented;
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented;
} else {
qpoints->pAllocArray = art_quick_alloc_array##suffix;
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix;
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix;
qpoints->pAllocObject = art_quick_alloc_object##suffix;
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix;
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix;
qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix;
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix;
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix;
}
}
选取一个简单的函数:pAllocObject。看看它的汇编实现。可见art_quick_alloc_object_rosalloc实际上是使用bl指令跳转到C函数artAllocObjectFromCodeRosAlloc。参数type_idx描述的是要分配的对象的类型,通过寄存器r0传递,参数method描述的是当前调用的类方法,通过寄存器r1传递。
/art/runtime/arch/quick_alloc_entrypoints.S
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
// Called by managed code to allocate an object.
TWO_ARG_DOWNCALL art_quick_alloc_objectc_suffix, artAllocObjectFromCodecxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
// Called by managed code to allocate an object of a resolved class.
/art/runtime/arch/arm/quick_entrypoints_arm.S
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern entrypoint
ENTRY name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
@ save callee saves in case of GC
mov
r2, r9
@ pass Thread::Current
mov
r3, sp
@ pass SP
bl
entrypoint
@ (uint32_t type_idx, Method* method, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
return
DELIVER_PENDING_EXCEPTION
END name
.endm
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR 生成了artAllocObjectFromCodeRosAlloc函数的代码。
/art/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type)
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2(
uint32_t type_idx, mirror::ArtMethod* method, Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type);
}
/art/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(suffix, allocator_type)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, Instrumented, true, allocator_type)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, , false, allocator_type)
CheckObjectAlloc根据入参type_idx和method查找对应的DexCache是否已经解析过这个类了,若没有,则使用ResolveType进行解析,此外,还要确保这个类进行初始化。如果是使用ResolveType对类进行解析的,则slow_path为true;若在DexCache查找到解析记录的,则slow_path为false。CheckObjectAlloc返回要分配的对象对应的class。
对于slow_path为true的情况,使用的分配器是Heap堆使用的分配器;对于slow_path为false的情况,则使用入参对应的分配器。
/art/runtime/entrypoints/entrypoint_utils-inl.h
template <bool kAccessCheck, bool kInstrumented>
static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
if (UNLIKELY(slow_path)) {
if (klass == nullptr) {
return nullptr;
}
return klass->Alloc<kInstrumented>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
DCHECK(klass != nullptr);
return klass->Alloc<kInstrumented>(self, allocator_type);
}
add_finalizer表示这个类是否重写了finalize()函数,如果这个类重写了finalize()函数,则调用Heap::AddFinalizerReference调用Java代码,以分配出来的Object为参数生成一个FinalizerReference,所有的FinalizerReference将构成一个链表,当gc需要回收这个Object时,会先执行对应的FinalizerReference的finalize()函数,执行完后,再判断是否还需要回收。
/art/runtime/mirror/class-inl.h
template<bool kIsInstrumented, bool kCheckAddFinalizer>
inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
CheckObjectAlloc();
gc::Heap* heap = Runtime::Current()->GetHeap();
const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
if (!kCheckAddFinalizer) {
DCHECK(!IsFinalizable());
}
mirror::Object* obj =
heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, this, this->object_size_,
allocator_type, VoidFunctor());
if (add_finalizer && LIKELY(obj != nullptr)) {
heap->AddFinalizerReference(self, &obj);
}
return obj;
}
现在假定AllocObjectWithAllocator的分配器类型入参为kAllocatorTypeRosAlloc。
首先是调用TryToAllocate进行分配,如果不成功,就尝试先gc后再分配,即是调用AllocateInternalWithGc函数,如果调用AllocateInternalWithGc还是没有分配成功而且这个过程中堆的当前分配器类型从参数allocator表示的分配器类型变成了其他的分配器类型,则调用Heap类的AllocObject进行分配,AllocObject会重新调用到AllocObjectWithAllocator,这个AllocObjectWithAllocator和上一次的AllocObjectWithAllocator不同,因为第一次调用时模板参数kCheckLargeObject为false,这次调用时模板参数kCheckLargeObject为true,还有作为入参的分配器类型为Heap堆当前使用的分配器类型。也就是意味着这次的AllocObjectWithAllocator会走分配大对象的流程。ShouldAllocLargeObject检查要分配的空间是否大于12kb且是一个原始类型数组,如果是,则调用AllocLargeObject分配大对象空间,分配器类型为kAllocatorTypeLOS。若分配失败,则清掉可能抛出的OOM异常,继续往下走,重复AllocObjectWithAllocator的流程,直到分配成功或者抛出OOM异常。
/art/runtime/gc/heap-inl.h
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
size_t byte_count, AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor) {
if (kIsDebugBuild) {
CheckPreconditionsForAllocObject(klass, byte_count);
// Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
// done in the runnable state where suspension is expected.
CHECK_EQ(self->GetState(), kRunnable);
self->AssertThreadSuspensionIsAllowable();
}
// Need to check that we arent the large object allocator since the large object allocation code
// path this function. If we didn't check we would have an infinite loop.
mirror::Object* obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
pre_fence_visitor);
if (obj != nullptr) {
return obj;
} else {
// There should be an OOM exception, since we are retrying, clear it.
self->ClearException();
}
// If the large object allocation failed, try to use the normal spaces (main space,
// non moving space). This can happen if there is significant virtual address space
// fragmentation.
}
AllocationTimer alloc_timer(this, &obj);
size_t bytes_allocated;
size_t usable_size;
size_t new_num_bytes_allocated = 0;
if (allocator == kAllocatorTypeTLAB) {
byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
}
// If we have a thread local allocation we don't need to update bytes allocated.
if (allocator == kAllocatorTypeTLAB && byte_count <= self->TlabSize()) {
obj = self->AllocTlab(byte_count);
DCHECK(obj != nullptr) << "AllocTlab can't fail";
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
obj->SetReadBarrierPointer(obj);
}
obj->AssertReadBarrierPointer();
}
bytes_allocated = byte_count;
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size);
if (UNLIKELY(obj == nullptr)) {
bool is_current_allocator = allocator == GetCurrentAllocator();
obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
&klass);
if (obj == nullptr) {
bool after_is_current_allocator = allocator == GetCurrentAllocator();
// If there is a pending exception, fail the allocation right away since the next one
// could cause OOM and abort the runtime.
if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
// If the allocator changed, we need to restart the allocation.
return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
}
return nullptr;
}
}
DCHECK_GT(bytes_allocated, 0u);
DCHECK_GT(usable_size, 0u);
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
obj->SetReadBarrierPointer(obj);
}
obj->AssertReadBarrierPointer();
}
if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
// (Note this if statement will be constant folded away for the
// fast-path quick entry points.) Because SetClass() has no write
// barrier, if a non-moving space allocation, we need a write
// barrier as the class pointer may point to the bump pointer
// space (where the class pointer is an "old-to-young" reference,
// though rare) under the GSS collector with the remembered set
// enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
// cases because we don't directly allocate into the main alloc
// space (besides promotions) under the SS/GSS collector.
WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
new_num_bytes_allocated =
static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated))
+ bytes_allocated;
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
CHECK_LE(obj->SizeOf(), usable_size);
}
// TODO: Deprecate.
if (kInstrumented) {
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = self->GetStats();
++thread_stats->allocated_objects;
thread_stats->allocated_bytes += bytes_allocated;
RuntimeStats* global_stats = Runtime::Current()->GetStats();
++global_stats->allocated_objects;
global_stats->allocated_bytes += bytes_allocated;
}
} else {
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
if (AllocatorHasAllocationStack(allocator)) {
PushOnAllocationStack(self, &obj);
}
if (kInstrumented) {
if (Dbg::IsAllocTrackingEnabled()) {
Dbg::RecordAllocation(klass, bytes_allocated);
}
} else {
DCHECK(!Dbg::IsAllocTrackingEnabled());
}
// IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
// the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
// the allocator_type should be constant propagated.
if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
self->VerifyStack();
return obj;
}
// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;
inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
if (kUseThreadLocalAllocationStack) {
if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
}
} else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
PushOnAllocationStackWithInternalGC(self, obj);
}
}
template <bool kInstrumented, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor) {
// Save and restore the class in case it moves.
StackHandleScope<1> hs(self);
auto klass_wrapper = hs.NewHandleWrapper(klass);
return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
kAllocatorTypeLOS,
pre_fence_visitor);
}
可以看到Heap::TryToAllocate是在rosalloc_space_上调用AllocNonvirtual进行分配的。具体的分配原理以后再讲。
/art/runtime/gc/heap-inl.h
template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size) {
if (allocator_type != kAllocatorTypeTLAB &&
UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
return nullptr;
}
mirror::Object* ret;
switch (allocator_type) {
...
case kAllocatorTypeRosAlloc: {
if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
// If running on valgrind, we should be using the instrumented path.
ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
} else {
DCHECK(!running_on_valgrind_);
ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
}
break;
}
AllocateInternalWithGc首先是等待当前正在执行的gc完成。当等待的gc完成以后,调用TryToAllocate进行一次分配,因为刚刚结束的gc已经释放了一部分空间。若分配失败,则调用CollectGarbageInternal进行一次GcType为next_gc_type_,GcCause为kGcCauseForAlloc的gc。gc完毕后,调用TryToAllocate进行一次分配。如果还是分配失败,对于gc_plan_里面的每一种不同的GcType,力度从小到大进行一次GcCause为kGcCauseForAlloc的gc,然后再进行分配操作。因为力度越大的gc对程序造成的卡顿影响越大,所以要循序渐进。
在使用了所有的GcType进行gc以后,如果还是分配失败,再进行一次TryToAllocate的操作,这个TryToAllocate的模板参数kGrow为true,只会在分配方式为kAllocatorTypeTLAB时起作用。其作用是当分配的总内存空间超过了max_allowed_footprint_设定的阈值时,TryToAllocate会返回null,如果kGrow为true,就会调整max_allowed_footprint_为已分配内存和这次分配所需空间的总和。
如果这样还是分配失败,就进行一次清除力度最大(GcType为kGcTypeFull),允许堆增长并且会清除软引用的gc,然后再进行分配。如果还是分配不到,针对kAllocatorTypeRosAlloc和kAllocatorTypeDlMalloc的分配方式执行一次同构空间压缩PerformHomogeneousSpaceCompact,然后在进行一次分配。针对kAllocatorTypeNonMoving的分配方式,可能的原因是堆空间尚未用满但是non_moving_space_已经满了,这时将non_moving_space_指定为main_space_,将main_space_当作non_moving_space_来使用,然后调用TryToAllocate进行分配。
如果还是分配不到,只能抛出OOM异常了。
/art/runtime/gc/heap.cc
mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
mirror::Class** klass) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
DCHECK(klass != nullptr);
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
klass = nullptr;
// Invalidate for safety.
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
if (last_gc != collector::kGcTypeNone) {
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size);
if (ptr != nullptr) {
return ptr;
}
}
collector::GcType tried_type = next_gc_type_;
const bool gc_ran =
CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
if (gc_ran) {
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size);
if (ptr != nullptr) {
return ptr;
}
}
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
if (gc_type == tried_type) {
continue;
}
// Attempt to run the collector, if we succeed, re-try the allocation.
const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
if (gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size);
if (ptr != nullptr) {
return ptr;
}
}
}
// Allocations have failed after GCs;
this is an exceptional state.
// Try harder, growing the heap if necessary.
mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
usable_size);
if (ptr != nullptr) {
return ptr;
}
// Most allocations should have succeeded by now, so the heap is really full, really fragmented,
// or the requested size is really big. Do another GC, collecting SoftReferences this time. The
// VM spec requires that all SoftReferences have been collected and cleared before throwing
// OOME.
VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
<< " allocation";
// TODO: Run finalization, but this may cause more allocations to occur.
// We don't need a WaitForGcToComplete here either.
DCHECK(!gc_plan_.empty());
CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
if (ptr == nullptr) {
const uint64_t current_time = NanoTime();
switch (allocator) {
case kAllocatorTypeRosAlloc:
// Fall-through.
case kAllocatorTypeDlMalloc: {
if (use_homogeneous_space_compaction_for_oom_ &&
current_time - last_time_homogeneous_space_compaction_by_oom_ >
min_interval_homogeneous_space_compaction_by_oom_) {
last_time_homogeneous_space_compaction_by_oom_ = current_time;
HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
switch (result) {
case HomogeneousSpaceCompactResult::kSuccess:
// If the allocation succeeded, we delayed an oom.
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
usable_size);
if (ptr != nullptr) {
count_delayed_oom_++;
}
break;
case HomogeneousSpaceCompactResult::kErrorReject:
// Reject due to disabled moving GC.
break;
case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
// Throw OOM by default.
break;
default: {
LOG(FATAL) << "Unimplemented homogeneous space compaction result "
<< static_cast<size_t>(result);
}
}
// Always print that we ran homogeneous space compation since this can cause jank.
VLOG(heap) << "Ran heap homogeneous space compaction, "
<< " requested defragmentation "
<< count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
<< " performed defragmentation "
<< count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
<< " ignored homogeneous space compaction "
<< count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
<< " delayed count = "
<< count_delayed_oom_.LoadSequentiallyConsistent();
}
break;
}
case kAllocatorTypeNonMoving: {
// Try to transition the heap if the allocation failure was due to the space being full.
if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
// If we are still a moving GC then something must have caused the transition to fail.
if (IsMovingGc(collector_type_)) {
MutexLock mu(self, *gc_complete_lock_);
// If we couldn't disable moving GC, just throw OOME and return null.
LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
<< disable_moving_gc_count_;
} else {
LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
usable_size);
}
}
break;
}
default: {
// Do nothing for others allocators.
}
}
}
// If the allocation hasn't succeeded by this point, throw an OOM error.
if (ptr == nullptr) {
ThrowOutOfMemoryError(self, alloc_size, allocator);
}
return ptr;
}
结合《ART类和方法》所提到的内容,当在Java代码中new一个新对象时,首先会用findClass查找到这个类,在findClass过程中先会调用AllocClass为这个类分配空间,再在LoadClassMembers中为类的field和method在art内部对应的ArtField和ArtMethod分配空间。
findClass完成后,就会使用内存分配函数(例如pAllocObject入口等)进行对实例对象内存的分配,实例对象内存大小由Class类的object_size_成员决定。分配完毕后,才会执行类的构造函数。
最后
以上就是高高荔枝为你收集整理的ART虚拟机的对象分配过程的全部内容,希望文章能够帮你解决ART虚拟机的对象分配过程所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复