ART虛擬機GC清除SoftReference(軟引用)源碼解析

引言

SoftReference保存的對象實例,除非JVM即將OutOfMemory,否則不會被GC回收。下面我們就從Android 8.0的源碼來探索一下具體清空過程和條件,有關GC原理不在討論範圍。

觸發因kGcCauseForAlloc導致的GC(內存不夠的情況下引起的GC)

在Activity寫個循環創建大量的java對象觸發kGcCauseForAlloc

對應的java方法

public class MainActivity extends AppCompatActivity {
    List<SoftReference<List<String>>> softReferenceList = new ArrayList<>();

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        
        TextView contentText = (TextView) findViewById(R.id.sample_text);
        contentText.setText("Allocate");
        contentText.setOnClickListener(new View.OnClickListener() {
            @Override
            public void onClick(View view) {
                for (int i = 0; i < 10000; i++) {
                    softReferenceList.add(new SoftReference<List<String>>(new ArrayList<String>(10240)));
                }
            }
        }); 
    }
}

點擊“Allocate”按鈕觸發大量Java對象生成。運行一會後最終會觸發ART中的調試斷點

斷點調試ART GC的調用棧

art::mirror::Object::SetFieldObjectWithoutWriteBarrier<false, true, (art::VerifyObjectFlags)0, true> object-inl.h:771
art::mirror::Object::SetFieldObject<false, true, (art::VerifyObjectFlags)0, true> object-inl.h:781
art::mirror::Object::SetFieldObjectVolatile<false, true, (art::VerifyObjectFlags)0> object-inl.h:792
art::mirror::Reference::ClearReferent<false> reference.h:75
art::gc::ReferenceQueue::ClearWhiteReferences reference_queue.cc:144
art::gc::ReferenceProcessor::ProcessReferences reference_processor.cc:168
art::gc::collector::ConcurrentCopying::ProcessReferences concurrent_copying.cc:2648
art::gc::collector::ConcurrentCopying::MarkingPhase concurrent_copying.cc:888
art::gc::collector::ConcurrentCopying::RunPhases concurrent_copying.cc:179
art::gc::collector::GarbageCollector::Run garbage_collector.cc:96
art::gc::Heap::CollectGarbageInternal heap.cc:2607
art::gc::Heap::AllocateInternalWithGc heap.cc:1658(對應下面源碼中的第89)
art::gc::Heap::AllocObjectWithAllocator<false, true, art::mirror::SetLengthVisitor> heap-inl.h:116
art::mirror::Array::Alloc<false, false> array-inl.h:183
art::AllocArrayFromCodeResolved<false> entrypoint_utils-inl.h:313
artAllocArrayFromCodeResolvedRegionTLAB quick_alloc_entrypoints.cc:128
art_quick_alloc_array_resolved32_region_tlab quick_entrypoints_x86.S:1264
...

分析調用棧

執行GC然後再嘗試分配內存

art/runtime/gc/heap.cc

//在AllocObjectWithAllocator調用這個函數之前。已經嘗試使用TryToAllocate分配內存,但是失敗了。本函數將在GC後再次調用TryToAllocate分配內存
mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
                                             AllocatorType allocator,
                                             bool instrumented,
                                             size_t alloc_size,
                                             size_t* bytes_allocated,
                                             size_t* usable_size,
                                             size_t* bytes_tl_bulk_allocated,
                                             ObjPtr<mirror::Class>* klass) {
  bool was_default_allocator = allocator == GetCurrentAllocator();
  // Make sure there is no pending exception since we may need to throw an OOME.
  self->AssertNoPendingException();
  DCHECK(klass != nullptr);
  StackHandleScope<1> hs(self);
  HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
  // The allocation failed. If the GC is running, block until it completes, and then retry the
  // allocation.
  collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
  // If we were the default allocator but the allocator changed while we were suspended,
  // abort the allocation.
  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
      (!instrumented && EntrypointsInstrumented())) {
    return nullptr;
  }
  if (last_gc != collector::kGcTypeNone) {
  	//在此之前有一次GC,我們先試試看能不能Allocate成功,成功的話,我們就不需要GC了。
    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                     usable_size, bytes_tl_bulk_allocated);
    if (ptr != nullptr) {
      return ptr;
    }
  }

  collector::GcType tried_type = next_gc_type_;
  const bool gc_ran =
      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
      (!instrumented && EntrypointsInstrumented())) {
    return nullptr;
  }
  if (gc_ran) {//gc已經執行
    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                     usable_size, bytes_tl_bulk_allocated);
    if (ptr != nullptr) {
      return ptr;
    }
  }

	//循環嘗試跟當前tried_type不同的GC,直到獲取到足夠的空閒內存空間或者循環結束退出
  // Loop through our different Gc types and try to Gc until we get enough free memory.
  for (collector::GcType gc_type : gc_plan_) {
    if (gc_type == tried_type) {
      continue;
    }
    // Attempt to run the collector, if we succeed, re-try the allocation.
    const bool plan_gc_ran =
        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
    if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
        (!instrumented && EntrypointsInstrumented())) {
      return nullptr;
    }
    if (plan_gc_ran) {//gc執行完成
      // Did we free sufficient memory for the allocation to succeed?
      //再次嘗試Allocate
      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                       usable_size, bytes_tl_bulk_allocated);
      if (ptr != nullptr) {
        return ptr;
      }
    }
  }
  // Allocations have failed after GCs;  this is an exceptional state.
  // Try harder, growing the heap if necessary.
  // 嘗試增長堆大小,控制開關是TryToAllocate<true, /*const bool kGrow*/true>第二個泛型參數kGrow
  mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                                  usable_size, bytes_tl_bulk_allocated);
  if (ptr != nullptr) {
    return ptr;
  }
  //大多數分配在到達這裏之前應該已經成功了。運行到這裏說明堆真的很滿,很碎片化,
  //或者要求分配的大小真的很大。 做一次與以往不同的GC,這次將會回收SoftReferences的空間。
  //VM規範要求在拋出OOME之前收集並清除所有SoftReferences
  VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
           << " allocation";
  // TODO: Run finalization, but this may cause more allocations to occur.
  // We don't need a WaitForGcToComplete here either.
  DCHECK(!gc_plan_.empty());
  //請求GC清除所有SoftReferences,控制開關是/*clear_soft_references*/,如字面意思當它爲true時清空所有SoftReferences
  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, /*clear_soft_references*/true);
  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
      (!instrumented && EntrypointsInstrumented())) {
    return nullptr;
  }
  ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
                                  bytes_tl_bulk_allocated);
  if (ptr == nullptr) {
    const uint64_t current_time = NanoTime();
    switch (allocator) {
      case kAllocatorTypeRosAlloc:
        // Fall-through.
      case kAllocatorTypeDlMalloc: {
        if (use_homogeneous_space_compaction_for_oom_ &&
            current_time - last_time_homogeneous_space_compaction_by_oom_ >
            min_interval_homogeneous_space_compaction_by_oom_) {
          last_time_homogeneous_space_compaction_by_oom_ = current_time;
          HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
          // Thread suspension could have occurred.
          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
              (!instrumented && EntrypointsInstrumented())) {
            return nullptr;
          }
          switch (result) {
            case HomogeneousSpaceCompactResult::kSuccess:
              // If the allocation succeeded, we delayed an oom.
              ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                              usable_size, bytes_tl_bulk_allocated);
              if (ptr != nullptr) {
                count_delayed_oom_++;
              }
              break;
            case HomogeneousSpaceCompactResult::kErrorReject:
              // Reject due to disabled moving GC.
              break;
            case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
              // Throw OOM by default.
              break;
            default: {
              UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
                  << static_cast<size_t>(result);
              UNREACHABLE();
            }
          }
          // Always print that we ran homogeneous space compation since this can cause jank.
          VLOG(heap) << "Ran heap homogeneous space compaction, "
                    << " requested defragmentation "
                    << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
                    << " performed defragmentation "
                    << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
                    << " ignored homogeneous space compaction "
                    << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
                    << " delayed count = "
                    << count_delayed_oom_.LoadSequentiallyConsistent();
        }
        break;
      }
      case kAllocatorTypeNonMoving: {
        if (kUseReadBarrier) {
          // DisableMovingGc() isn't compatible with CC.
          break;
        }
        // Try to transition the heap if the allocation failure was due to the space being full.
        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
          // If we aren't out of memory then the OOM was probably from the non moving space being
          // full. Attempt to disable compaction and turn the main space into a non moving space.
          DisableMovingGc();
          // Thread suspension could have occurred.
          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
              (!instrumented && EntrypointsInstrumented())) {
            return nullptr;
          }
          // If we are still a moving GC then something must have caused the transition to fail.
          if (IsMovingGc(collector_type_)) {
            MutexLock mu(self, *gc_complete_lock_);
            // If we couldn't disable moving GC, just throw OOME and return null.
            LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
                         << disable_moving_gc_count_;
          } else {
            LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
            ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                            usable_size, bytes_tl_bulk_allocated);
          }
        }
        break;
      }
      default: {
        // Do nothing for others allocators.
      }
    }
  }
  // If the allocation hasn't succeeded by this point, throw an OOM error.
  if (ptr == nullptr) {
  	// 拋出OOM錯誤
    ThrowOutOfMemoryError(self, alloc_size, allocator);
  }
  return ptr;
}

該函數在執行了各種GC後,仍舊無法Allocate時,會嘗試增長heap。然而還是無法Allocate,就會執行清空SoftReference的GC。

GC執行的過程中會調用ProcessReferences函數處理Reference

art/runtime/gc/reference_processor.cc

// Process reference class instances and schedule finalizations.
void ReferenceProcessor::ProcessReferences(bool concurrent,
                                           TimingLogger* timings,
                                           bool clear_soft_references,
                                           collector::GarbageCollector* collector) {
  TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
  Thread* self = Thread::Current();
  {
    MutexLock mu(self, *Locks::reference_processor_lock_);
    collector_ = collector;
    if (!kUseReadBarrier) {
      CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    } else {
      // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
      CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
    }
  }
  if (kIsDebugBuild && collector->IsTransactionActive()) {
    // In transaction mode, we shouldn't enqueue any Reference to the queues.
    // See DelayReferenceReferent().
    DCHECK(soft_reference_queue_.IsEmpty());
    DCHECK(weak_reference_queue_.IsEmpty());
    DCHECK(finalizer_reference_queue_.IsEmpty());
    DCHECK(phantom_reference_queue_.IsEmpty());
  }
  // Unless required to clear soft references with white references, preserve some white referents.
  if (!clear_soft_references) {//不清除SoftReference
    TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
        "(Paused)ForwardSoftReferences", timings);
    if (concurrent) {
      StartPreservingReferences(self);
    }
    // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
    // mark if the SoftReference is supposed to be preserved.
    // 標記soft_reference_queue_中的SoftReference,這樣就不會被清除了
    soft_reference_queue_.ForwardSoftReferences(collector);
    collector->ProcessMarkStack();
    if (concurrent) {
      StopPreservingReferences(self);
    }
  }
  // Clear all remaining soft and weak references with white referents.
  soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  {
    TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
        "(Paused)EnqueueFinalizerReferences", timings);
    if (concurrent) {
      StartPreservingReferences(self);
    }
    // Preserve all white objects with finalize methods and schedule them for finalization.
    finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
    collector->ProcessMarkStack();
    if (concurrent) {
      StopPreservingReferences(self);
    }
  }
  // Clear all finalizer referent reachable soft and weak references with white referents.
  soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  // Clear all phantom references with white referents.
  phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  // At this point all reference queues other than the cleared references should be empty.
  DCHECK(soft_reference_queue_.IsEmpty());
  DCHECK(weak_reference_queue_.IsEmpty());
  DCHECK(finalizer_reference_queue_.IsEmpty());
  DCHECK(phantom_reference_queue_.IsEmpty());
  {
    MutexLock mu(self, *Locks::reference_processor_lock_);
    // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    // could result in a stale is_marked_callback_ being called before the reference processing
    // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    // callback isn't yet set.
    collector_ = nullptr;
    if (!kUseReadBarrier && concurrent) {
      // Done processing, disable the slow path and broadcast to the waiters.
      DisableSlowPath(self);
    }
  }
}

以上函數會在標記階段調用。函數根據clear_soft_references函數參數決定是否要標記所有已經插入到ReferenceQueue隊列中的SoftReference,true則不標記,flase則標記。被標記的SoftReference將會被保留(不被GC清除)。由此可見clear_soft_references就如字面意思所表示的,是控制是否清空SoftReference的開關

art/runtime/gc/reference_queue.cc

void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
                                          collector::GarbageCollector* collector) {
  while (!IsEmpty()) {
    ObjPtr<mirror::Reference> ref = DequeuePendingReference();
    mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
    // do_atomic_update is false because this happens during the reference processing phase where
    // Reference.clear() would block.
    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
      // Referent is white, clear it.
      if (Runtime::Current()->IsActiveTransaction()) {
        ref->ClearReferent<true>();
      } else {
        ref->ClearReferent<false>();
      }
      // 加入到cleared_references
      cleared_references->EnqueueReference(ref);
    }
    // Delay disabling the read barrier until here so that the ClearReferent call above in
    // transaction mode will trigger the read barrier.
    DisableReadBarrierForReference(ref);
  }
}

art/runtime/mirror/reference.h

  template<bool kTransactionActive>
  void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
  	//將SoftReference.java中的volatile成員變量reference置爲null
    SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
  }

art/runtime/gc/reference_queue.cc

void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
  DCHECK(ref != nullptr);
  CHECK(ref->IsUnprocessed());
  if (IsEmpty()) {
    // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
    list_ = ref.Ptr();
  } else {
    // The list is owned by the GC, everything that has been inserted must already be at least
    // gray.
    ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
    DCHECK(head != nullptr);
    ref->SetPendingNext(head);
  }
  // Add the reference in the middle to preserve the cycle.
  list_->SetPendingNext(ref);
}

總結

從上面的代碼分析中,驗證了這個結論——SoftReference的確是只有在即將拋出OutOfMemoryError的情況下,纔會被GC回收。

當我們希望被緩存的對象最好始終常駐內存,但是如果JVM內存吃緊,爲了不發生OutOfMemoryError導致系統崩潰,允許JVM回收Cache的內存,然後這個緩存對象在回收後不會立即需要加載到內存中。這個時候就是使用SoftReference的最佳場景。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章