001 /*
002 * This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 * This file is licensed to You under the Eclipse Public License (EPL);
005 * You may not use this file except in compliance with the License. You
006 * may obtain a copy of the License at
007 *
008 * http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 * See the COPYRIGHT.txt file distributed with this work for information
011 * regarding copyright ownership.
012 */
013 package org.mmtk.policy.immix;
014
015 import static org.mmtk.policy.immix.ImmixConstants.*;
016
017 import org.mmtk.plan.Plan;
018 import org.mmtk.plan.TransitiveClosure;
019 import org.mmtk.policy.Space;
020 import org.mmtk.utility.heap.*;
021 import org.mmtk.utility.options.LineReuseRatio;
022 import org.mmtk.utility.options.Options;
023 import org.mmtk.utility.Constants;
024 import org.mmtk.utility.ForwardingWord;
025 import org.mmtk.utility.HeaderByte;
026 import org.mmtk.utility.Log;
027
028 import org.mmtk.vm.Lock;
029 import org.mmtk.vm.VM;
030
031 import org.vmmagic.pragma.*;
032 import org.vmmagic.unboxed.*;
033
034 /**
035 * Each instance of this class corresponds to one immix <b>space</b>.
036 * Each of the instance methods of this class may be called by any
037 * thread (i.e. synchronization must be explicit in any instance or
038 * class method). This contrasts with the SquishLocal, where
039 * instances correspond to *plan* instances and therefore to kernel
040 * threads. Thus unlike this class, synchronization is not necessary
041 * in the instance methods of SquishLocal.
042 *
043 */
044 @Uninterruptible
045 public final class ImmixSpace extends Space implements Constants {
046
047 /****************************************************************************
048 *
049 * Class variables
050 */
051
052 /**
053 *
054 */
055 private static short reusableMarkStateThreshold = 0;
056
057 /****************************************************************************
058 *
059 * Instance variables
060 */
061
062 /**
063 *
064 */
065 private byte markState = ObjectHeader.MARK_BASE_VALUE;
066 byte lineMarkState = RESET_LINE_MARK_STATE;
067 private byte lineUnavailState = RESET_LINE_MARK_STATE;
068 private boolean inCollection;
069 private int linesConsumed = 0;
070
071 private Lock mutatorLock = VM.newLock(getName()+"mutator");
072 private Lock gcLock = VM.newLock(getName()+"gc");
073
074 private Address allocBlockCursor = Address.zero();
075 private Address allocBlockSentinel = Address.zero();
076 private boolean exhaustedReusableSpace = true;
077
078 private final ChunkList chunkMap = new ChunkList();
079 private final Defrag defrag;
080
081 /****************************************************************************
082 *
083 * Initialization
084 */
085
086 static {
087 Options.lineReuseRatio = new LineReuseRatio();
088 reusableMarkStateThreshold = (short) (Options.lineReuseRatio.getValue() * MAX_BLOCK_MARK_STATE);
089 }
090
091 /**
092 * The caller specifies the region of virtual memory to be used for
093 * this space. If this region conflicts with an existing space,
094 * then the constructor will fail.
095 *
096 * @param name The name of this space (used when printing error messages etc)
097 * @param vmRequest The virtual memory request
098 */
099 public ImmixSpace(String name, VMRequest vmRequest) {
100 this(name, true, vmRequest);
101 }
102
103 /**
104 * The caller specifies the region of virtual memory to be used for
105 * this space. If this region conflicts with an existing space,
106 * then the constructor will fail.
107 *
108 * @param name The name of this space (used when printing error messages etc)
109 * @param zeroed if true, allocations return zeroed memory
110 * @param vmRequest The virtual memory request
111 */
112 public ImmixSpace(String name, boolean zeroed, VMRequest vmRequest) {
113 super(name, false, false, zeroed, vmRequest);
114 if (vmRequest.isDiscontiguous())
115 pr = new FreeListPageResource(this, Chunk.getRequiredMetaDataPages());
116 else
117 pr = new FreeListPageResource(this, start, extent, Chunk.getRequiredMetaDataPages());
118 defrag = new Defrag((FreeListPageResource) pr);
119 }
120
121 /****************************************************************************
122 *
123 * Global prepare and release
124 */
125
126 /**
127 * Prepare for a new collection increment.
128 */
129 public void prepare(boolean majorGC) {
130 if (majorGC) {
131 markState = ObjectHeader.deltaMarkState(markState, true);
132 lineMarkState++;
133 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(lineMarkState <= MAX_LINE_MARK_STATE);
134 }
135 chunkMap.reset();
136 defrag.prepare(chunkMap, this);
137 inCollection = true;
138
139 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(VM.activePlan.collectorCount() <= MAX_COLLECTORS);
140 }
141
142 /**
143 * A new collection increment has completed. Release global resources.
144 * @param majorGC TODO
145 */
146 public boolean release(boolean majorGC) {
147 boolean didDefrag = defrag.inDefrag();
148 if (majorGC) {
149 if (lineMarkState == MAX_LINE_MARK_STATE)
150 lineMarkState = RESET_LINE_MARK_STATE;
151 lineUnavailState = lineMarkState;
152 }
153 chunkMap.reset();
154 defrag.globalRelease();
155 inCollection = false;
156
157 /* set up reusable space */
158 if (allocBlockCursor.isZero()) allocBlockCursor = chunkMap.getHeadChunk();
159 allocBlockSentinel = allocBlockCursor;
160 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isRecycleAllocChunkAligned(allocBlockSentinel));
161 exhaustedReusableSpace = allocBlockCursor.isZero();
162 if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
163 Log.write("gr[allocBlockCursor: "); Log.write(allocBlockCursor); Log.write(" allocBlockSentinel: "); Log.write(allocBlockSentinel); Log.writeln("]");
164 }
165
166 /* really just want this to happen once after options are booted, but no harm in re-doing it */
167 reusableMarkStateThreshold = (short) (Options.lineReuseRatio.getValue() * MAX_BLOCK_MARK_STATE);
168 Defrag.defragReusableMarkStateThreshold = (short) (Options.defragLineReuseRatio.getValue() * MAX_BLOCK_MARK_STATE);
169
170 linesConsumed = 0;
171 return didDefrag;
172 }
173
174 /**
175 * Determine the collection kind.
176 *
177 * @param emergencyCollection Is this collection an emergency (last did not yield enough)?
178 * @param collectWholeHeap Is this a whole heap collection?
179 * @param collectionAttempt Which attempt is this to collect?
180 * @param userTriggeredCollection Was this collection requested by the user?
181 */
182 public void decideWhetherToDefrag(boolean emergencyCollection, boolean collectWholeHeap, int collectionAttempt, boolean userTriggeredCollection) {
183 defrag.decideWhetherToDefrag(emergencyCollection, collectWholeHeap, collectionAttempt, userTriggeredCollection, exhaustedReusableSpace);
184 }
185
186 /**
187 * Return the amount of headroom required to allow defrag, so this can be included in a collection reserve.
188 *
189 * @return The number of pages.
190 */
191 public int defragHeadroomPages() {
192 return defrag.getDefragHeadroomPages();
193 }
194
195 /****************************************************************************
196 *
197 * Collection state access methods
198 */
199
200 /**
201 * Return {@code true} if this space is currently being collected.
202 *
203 * @return {@code true} if this space is currently being collected.
204 */
205 @Inline
206 public boolean inImmixCollection() {
207 return inCollection;
208 }
209
210 /**
211 * Return {@code true} if this space is currently being defraged.
212 *
213 * @return {@code true} if this space is currently being defraged.
214 */
215 @Inline
216 public boolean inImmixDefragCollection() {
217 return inCollection && defrag.inDefrag();
218 }
219
220 /**
221 * Return the number of pages allocated since the last collection
222 *
223 * @return The number of pages allocated since the last collection
224 */
225 public int getPagesAllocated() {
226 return linesConsumed>>(LOG_BYTES_IN_PAGE-LOG_BYTES_IN_LINE);
227 }
228
229 /**
230 * Return the reusable mark state threshold, which determines how
231 * eagerly lines should be recycled (by default these values are
232 * set so that all lines are recycled).
233 *
234 * @param forDefrag The query is the context of a defragmenting collection
235 * @return The reusable mark state threshold
236 */
237 @Inline
238 public static short getReusuableMarkStateThreshold(boolean forDefrag) {
239 return forDefrag ? Defrag.defragReusableMarkStateThreshold : reusableMarkStateThreshold;
240 }
241
242 /****************************************************************************
243 *
244 * Allocation
245 */
246
247 /**
248 * Return a pointer to a set of new usable blocks, or null if none are available.
249 * Use different block selection heuristics depending on whether the allocation
250 * request is "hot" or "cold".
251 *
252 * @param hot True if the requesting context is for hot allocations (used for
253 * allocations from high allocation volume sites).
254 * @return The pointer into the alloc table containing usable blocks.
255 */
256 public Address getSpace(boolean hot, boolean copy, int lineUseCount) {
257 Address rtn;
258 if (copy)
259 defrag.getBlock();
260
261 linesConsumed += lineUseCount;
262
263 rtn = acquire(PAGES_IN_BLOCK);
264
265 if (VM.VERIFY_ASSERTIONS) {
266 VM.assertions._assert(Block.isAligned(rtn));
267 VM.assertions._assert(!(copy && Block.isDefragSource(rtn)));
268 }
269
270 if (!rtn.isZero()) {
271 Block.setBlockAsInUse(rtn);
272 Chunk.updateHighWater(rtn);
273 if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
274 Log.write("gs["); Log.write(rtn); Log.write(" -> "); Log.write(rtn.plus(BYTES_IN_BLOCK-1)); Log.write(" copy: "); Log.write(copy); Log.writeln("]");
275 }
276 }
277
278 return rtn;
279 }
280
281 @Override
282 public void growSpace(Address start, Extent bytes, boolean newChunk) {
283 super.growSpace(start, bytes, newChunk);
284 if (newChunk) {
285 Address chunk = chunkAlign(start.plus(bytes), true);
286 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(chunkAlign(start.plus(bytes), true).EQ(chunk));
287 Chunk.clearMetaData(chunk);
288 chunkMap.addNewChunkToMap(chunk);
289 }
290 }
291
292 public Address acquireReusableBlocks() {
293 if (VM.VERIFY_ASSERTIONS) {
294 VM.assertions._assert(isRecycleAllocChunkAligned(allocBlockCursor));
295 VM.assertions._assert(isRecycleAllocChunkAligned(allocBlockSentinel));
296 }
297 Address rtn;
298
299 lock();
300 if (exhaustedReusableSpace)
301 rtn = Address.zero();
302 else {
303 rtn = allocBlockCursor;
304 Address lastAllocChunk = chunkAlign(allocBlockCursor, true);
305 allocBlockCursor = allocBlockCursor.plus(BYTES_IN_RECYCLE_ALLOC_CHUNK);
306 if (allocBlockCursor.GT(Chunk.getHighWater(lastAllocChunk)))
307 allocBlockCursor = chunkMap.nextChunk(lastAllocChunk);
308 if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
309 Log.write("arb[ rtn: "); Log.write(rtn); Log.write(" allocBlockCursor: "); Log.write(allocBlockCursor); Log.write(" allocBlockSentinel: "); Log.write(allocBlockSentinel); Log.writeln("]");
310 }
311
312 if (allocBlockCursor.isZero() || allocBlockCursor.EQ(allocBlockSentinel)) {
313 exhaustedReusableSpace = true;
314 if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
315 Log.writeln("[Reusable space exhausted]");
316 }
317 }
318 }
319 unlock();
320 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isRecycleAllocChunkAligned(rtn));
321 return rtn;
322 }
323
324 /**
325 * Release a block. A block is free, so call the underlying page allocator
326 * to release the associated storage.
327 *
328 * @param block The address of the block to be released
329 */
330 @Override
331 @Inline
332 public void release(Address block) {
333 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(Block.isAligned(block));
334 Block.setBlockAsUnallocated(block);
335 ((FreeListPageResource) pr).releasePages(block);
336 }
337
338 /**
339 * {@inheritDoc}<p>
340 *
341 * This hook is called by the page level allocators whenever a
342 * complete discontiguous chunk is released.
343 */
344 @Override
345 public int releaseDiscontiguousChunks(Address chunk) {
346 chunkMap.removeChunkFromMap(chunk);
347 return super.releaseDiscontiguousChunks(chunk);
348 }
349
350 /****************************************************************************
351 *
352 * Header manipulation
353 */
354
355 /**
356 * Perform any required post allocation initialization
357 *
358 * @param object the object ref to the storage to be initialized
359 */
360 @Inline
361 public void postAlloc(ObjectReference object, int bytes) {
362 if (bytes > BYTES_IN_LINE)
363 ObjectHeader.markAsStraddling(object);
364 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(ObjectHeader.isNewObject(object));
365 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ForwardingWord.isForwardedOrBeingForwarded(object));
366 }
367
368 /**
369 * Perform any required post copy (i.e. in-GC allocation) initialization.
370 * This is relevant (for example) when Squish is used as the mature space in
371 * a copying GC.
372 *
373 * @param object the object ref to the storage to be initialized
374 * @param majorGC Is this copy happening during a major gc?
375 */
376 @Inline
377 public void postCopy(ObjectReference object, int bytes, boolean majorGC) {
378 ObjectHeader.writeMarkState(object, markState, bytes > BYTES_IN_LINE);
379 if (!MARK_LINE_AT_SCAN_TIME && majorGC) markLines(object);
380 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ForwardingWord.isForwardedOrBeingForwarded(object));
381 if (VM.VERIFY_ASSERTIONS && HeaderByte.NEEDS_UNLOGGED_BIT) VM.assertions._assert(HeaderByte.isUnlogged(object));
382 }
383
384 /****************************************************************************
385 *
386 * Object tracing
387 */
388
389 /**
390 * Trace a reference to an object. If the object header is not already
391 * marked, mark the object and enqueue it for subsequent processing.
392 *
393 * @param trace The trace performing the transitive closure
394 * @param object The object to be traced.
395 * @param allocator The allocator to which any copying should be directed
396 * @return The object, which may have been moved.
397 */
398 @Inline
399 public ObjectReference traceObject(TransitiveClosure trace, ObjectReference object, int allocator) {
400 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(defrag.determined(true));
401
402 ObjectReference rtn = object;
403 if (isDefragSource(object))
404 rtn = traceObjectWithOpportunisticCopy(trace, object, allocator, false);
405 else
406 traceObjectWithoutMoving(trace, object);
407
408 if (VM.VERIFY_ASSERTIONS) {
409 VM.assertions._assert(!rtn.isNull());
410 VM.assertions._assert(defrag.spaceExhausted() || !isDefragSource(rtn) || (ObjectHeader.isPinnedObject(rtn)));
411 }
412 return rtn;
413 }
414
415 /**
416 * Trace a reference to an object in the context of a non-moving collection. This
417 * call is optimized for the simpler non-moving case.
418 *
419 * @param trace The trace performing the transitive closure
420 * @param object The object to be traced.
421 * @return The object (there is no object forwarding in this
422 * trace method, so we always return the same object: this could be a
423 * void method but for compliance to a more general interface).
424 */
425 @Inline
426 public ObjectReference fastTraceObject(TransitiveClosure trace, ObjectReference object) {
427 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(defrag.determined(false));
428 traceObjectWithoutMoving(trace, object);
429 return object;
430 }
431
432 /**
433 * Trace a reference to an object during a nursery collection for
434 * a sticky mark bits implementation of immix. If the object header
435 * is not already marked, mark the object and enqueue it for subsequent
436 * processing.
437 *
438 * @param trace The trace performing the transitive closure
439 * @param object The object to be traced.
440 * @param allocator The allocator to which any copying should be directed
441 * @return Either the object or a forwarded object, depending on
442 * the policy in place.
443 */
444 @Inline
445 public ObjectReference nurseryTraceObject(TransitiveClosure trace, ObjectReference object, int allocator) {
446 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!defrag.inDefrag());
447 if (ObjectHeader.isMatureObject(object))
448 return object;
449 else if (PREFER_COPY_ON_NURSERY_GC)
450 return traceObjectWithOpportunisticCopy(trace, object, allocator, true);
451 else
452 return fastTraceObject(trace, object);
453 }
454
455 /**
456 * Trace a reference to an object. This interface is not supported by immix, since
457 * we require the allocator to be identified except for the special case of the fast
458 * trace.
459 *
460 * @param trace The trace performing the transitive closure
461 * @param object The object to be traced.
462 * @return null and fail.
463 */
464 @Override
465 public ObjectReference traceObject(TransitiveClosure trace, ObjectReference object) {
466 VM.assertions.fail("unsupported interface");
467 return null;
468 }
469
470 /**
471 * Trace a reference to an object in the context of a non-moving collection. This
472 * call is optimized for the simpler non-moving case.
473 *
474 * @param trace The trace performing the transitive closure
475 * @param object The object to be traced.
476 */
477 @Inline
478 private void traceObjectWithoutMoving(TransitiveClosure trace, ObjectReference object) {
479 byte markValue = markState;
480 byte oldMarkState = ObjectHeader.testAndMark(object, markValue);
481 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!defrag.inDefrag() || defrag.spaceExhausted() || !isDefragSource(object));
482 if (oldMarkState != markValue) {
483 if (!MARK_LINE_AT_SCAN_TIME)
484 markLines(object);
485 trace.processNode(object);
486 }
487 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ForwardingWord.isForwardedOrBeingForwarded(object));
488 if (VM.VERIFY_ASSERTIONS && HeaderByte.NEEDS_UNLOGGED_BIT) VM.assertions._assert(HeaderByte.isUnlogged(object));
489 }
490
491 /**
492 * Trace a reference to an object, forwarding the object if appropriate
493 * If the object is not already marked, mark the object and enqueue it
494 * for subsequent processing.
495 *
496 * @param trace The trace performing the transitive closure
497 * @param object The object to be traced.
498 * @param allocator The allocator to which any copying should be directed
499 * @return Either the object or a forwarded object, if it was forwarded.
500 */
501 @Inline
502 private ObjectReference traceObjectWithOpportunisticCopy(TransitiveClosure trace, ObjectReference object, int allocator, boolean nurseryCollection) {
503 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert((nurseryCollection && !ObjectHeader.isMatureObject(object)) || (defrag.determined(true) && isDefragSource(object)));
504
505 /* Race to be the (potential) forwarder */
506 Word priorStatusWord = ForwardingWord.attemptToForward(object);
507 if (ForwardingWord.stateIsForwardedOrBeingForwarded(priorStatusWord)) {
508 /* We lost the race; the object is either forwarded or being forwarded by another thread. */
509 /* Note that the concurrent attempt to forward the object may fail, so the object may remain in-place */
510 ObjectReference rtn = ForwardingWord.spinAndGetForwardedObject(object, priorStatusWord);
511 if (VM.VERIFY_ASSERTIONS && rtn == object) VM.assertions._assert((nurseryCollection && ObjectHeader.testMarkState(object, markState)) || defrag.spaceExhausted() || ObjectHeader.isPinnedObject(object));
512 if (VM.VERIFY_ASSERTIONS && rtn != object) VM.assertions._assert(nurseryCollection || !isDefragSource(rtn));
513 if (VM.VERIFY_ASSERTIONS && HeaderByte.NEEDS_UNLOGGED_BIT) VM.assertions._assert(HeaderByte.isUnlogged(rtn));
514 return rtn;
515 } else {
516 byte priorState = (byte) (priorStatusWord.toInt() & 0xFF);
517 /* the object is unforwarded, either because this is the first thread to reach it, or because the object can't be forwarded */
518 if (ObjectHeader.testMarkState(priorState, markState)) {
519 /* the object has not been forwarded, but has the correct mark state; unlock and return unmoved object */
520 /* Note that in a sticky mark bits collector, the mark state does not change at each GC, so correct mark state does not imply another thread got there first */
521 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(nurseryCollection || defrag.spaceExhausted() || ObjectHeader.isPinnedObject(object));
522 ObjectHeader.returnToPriorStateAndEnsureUnlogged(object, priorState); // return to uncontested state
523 if (VM.VERIFY_ASSERTIONS && Plan.NEEDS_LOG_BIT_IN_HEADER) VM.assertions._assert(HeaderByte.isUnlogged(object));
524 return object;
525 } else {
526 /* we are the first to reach the object; either mark in place or forward it */
527 ObjectReference newObject;
528 if (ObjectHeader.isPinnedObject(object) || (!nurseryCollection && defrag.spaceExhausted())) {
529 /* mark in place */
530 ObjectHeader.setMarkStateUnlogAndUnlock(object, priorState, markState);
531 newObject = object;
532 if (VM.VERIFY_ASSERTIONS && Plan.NEEDS_LOG_BIT_IN_HEADER) VM.assertions._assert(HeaderByte.isUnlogged(newObject));
533 } else {
534 /* forward */
535 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ObjectHeader.isPinnedObject(object));
536 newObject = ForwardingWord.forwardObject(object, allocator);
537 if (VM.VERIFY_ASSERTIONS && Plan.NEEDS_LOG_BIT_IN_HEADER) VM.assertions._assert(HeaderByte.isUnlogged(newObject));
538 }
539 if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
540 Log.write("C["); Log.write(object); Log.write("/");
541 Log.write(getName()); Log.write("] -> ");
542 Log.write(newObject); Log.write("/");
543 Log.write(Space.getSpaceForObject(newObject).getName());
544 Log.writeln("]");
545 }
546 if (!MARK_LINE_AT_SCAN_TIME)
547 markLines(newObject);
548 trace.processNode(newObject);
549 if (VM.VERIFY_ASSERTIONS) {
550 if (!((getSpaceForObject(newObject) != this) ||
551 (newObject == object) ||
552 (nurseryCollection && willNotMoveThisNurseryGC(newObject)) ||
553 (defrag.inDefrag() && willNotMoveThisGC(newObject))
554 )) {
555 Log.write(" object: "); Log.writeln(object);
556 Log.write("newObject: "); Log.writeln(newObject);
557 Log.write(" space: "); Log.writeln(getName());
558 Log.write(" nursery?: "); Log.writeln(nurseryCollection);
559 Log.write(" mature?: "); Log.writeln(ObjectHeader.isMatureObject(object));
560 Log.write(" wnmngc?: "); Log.writeln(willNotMoveThisNurseryGC(newObject));
561 Log.write(" pinned?: "); Log.writeln(ObjectHeader.isPinnedObject(object));
562 Space otherSpace = getSpaceForObject(newObject);
563 Log.write(" space(o): "); Log.writeln(otherSpace == null ? "<NULL>" : otherSpace.getName());
564 VM.assertions._assert(false);
565 }
566 }
567 return newObject;
568 }
569 }
570 }
571
572 /**
573 * Mark the line/s associated with a given object. This is distinct from the
574 * above tracing code because line marks are stored separately from the
575 * object headers (thus both must be set), and also because we found empirically
576 * that it was more efficient to perform the line mark of the object during
577 * the scan phase (which occurs after the trace phase), presumably because
578 * the latency of the associated memory operations was better hidden in the
579 * context of that code
580 *
581 * @param object The object which is live and for which the associated lines
582 * must be marked.
583 */
584 public void markLines(ObjectReference object) {
585 Address address = VM.objectModel.objectStartRef(object);
586 Line.mark(address, lineMarkState);
587 if (ObjectHeader.isStraddlingObject(object))
588 Line.markMultiLine(address, object, lineMarkState);
589 }
590
591 public int getNextUnavailableLine(Address baseLineAvailAddress, int line) {
592 return Line.getNextUnavailable(baseLineAvailAddress, line, lineUnavailState);
593 }
594
595 public int getNextAvailableLine(Address baseLineAvailAddress, int line) {
596 return Line.getNextAvailable(baseLineAvailAddress, line, lineUnavailState);
597 }
598
599 /****************************************************************************
600 *
601 * Establish available lines
602 */
603
604 /**
605 * Establish the number of recyclable lines lines available for allocation
606 * during defragmentation, populating the spillAvailHistogram, which buckets
607 * available lines according to the number of holes on the block on which
608 * the available lines reside.
609 *
610 * @param spillAvailHistogram A histogram of availability to be populated
611 * @return The number of available recyclable lines
612 */
613 int getAvailableLines(int[] spillAvailHistogram) {
614 int availableLines;
615 if (allocBlockCursor.isZero() || exhaustedReusableSpace) {
616 availableLines = 0;
617 } else {
618 if (allocBlockCursor.EQ(allocBlockSentinel)) {
619 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!exhaustedReusableSpace);
620 allocBlockCursor = chunkMap.getHeadChunk();
621 allocBlockSentinel = allocBlockCursor;
622 }
623 availableLines = getUsableLinesInRegion(allocBlockCursor, allocBlockSentinel, spillAvailHistogram);
624 }
625 return availableLines;
626 }
627
628 /**
629 * Return the number of lines usable for allocation during defragmentation in the
630 * address range specified by start and end. Populate a histogram to indicate where
631 * the usable lines reside as a function of block hole count.
632 *
633 * @param start The start of the region to be checked for availability
634 * @param end The end of the region to be checked for availability
635 * @param spillAvailHistogram The histogram which will be populated
636 * @return The number of usable lines
637 */
638 private int getUsableLinesInRegion(Address start, Address end, int[] spillAvailHistogram) {
639 int usableLines = 0;
640 Address blockCursor = Chunk.isAligned(start) ? start.plus(Chunk.FIRST_USABLE_BLOCK_INDEX<<LOG_BYTES_IN_BLOCK) : start;
641 Address blockStateCursor = Block.getBlockMarkStateAddress(blockCursor);
642 Address chunkCursor = Chunk.align(blockCursor);
643 if (Chunk.getByteOffset(end) < Chunk.FIRST_USABLE_BLOCK_INDEX<<LOG_BYTES_IN_BLOCK)
644 end = Chunk.align(end).plus(Chunk.FIRST_USABLE_BLOCK_INDEX<<LOG_BYTES_IN_BLOCK);
645
646 for (int i = 0; i <= MAX_CONSV_SPILL_COUNT; i++) spillAvailHistogram[i] = 0;
647
648 Address highwater = Chunk.getHighWater(chunkCursor);
649 do {
650 short markState = blockStateCursor.loadShort();
651 if (markState != 0 && markState <= reusableMarkStateThreshold) {
652 int usable = LINES_IN_BLOCK - markState;
653 short bucket = Block.getConservativeSpillCount(blockCursor);
654 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(bucket >= 0 && bucket <= MAX_CONSV_SPILL_COUNT);
655 spillAvailHistogram[bucket] += usable;
656 usableLines += usable;
657 }
658 blockCursor = blockCursor.plus(BYTES_IN_BLOCK);
659 if (blockCursor.GT(highwater)) {
660 chunkCursor = chunkMap.nextChunk(chunkCursor);
661 if (chunkCursor.isZero()) break;
662 blockCursor = chunkCursor.plus(Chunk.FIRST_USABLE_BLOCK_INDEX<<LOG_BYTES_IN_BLOCK);
663 blockStateCursor = Block.getBlockMarkStateAddress(blockCursor);
664 highwater = Chunk.getHighWater(chunkCursor);
665 } else
666 blockStateCursor = blockStateCursor.plus(Block.BYTES_IN_BLOCK_STATE_ENTRY);
667 } while (blockCursor.NE(end));
668
669 return usableLines;
670 }
671
672 /****************************************************************************
673 *
674 * Object state
675 */
676
677 /**
678 * Generic test of the liveness of an object
679 *
680 * @param object The object in question
681 * @return {@code true} if this object is known to be live (i.e. it is marked)
682 */
683 @Override
684 @Inline
685 public boolean isLive(ObjectReference object) {
686 if (defrag.inDefrag() && isDefragSource(object))
687 return ForwardingWord.isForwardedOrBeingForwarded(object) || ObjectHeader.testMarkState(object, markState);
688 else
689 return ObjectHeader.testMarkState(object, markState);
690 }
691
692 /**
693 * Test the liveness of an object during copying sticky mark bits collection
694 *
695 * @param object The object in question
696 * @return True if this object is known to be live (i.e. it is marked)
697 */
698 @Inline
699 public boolean copyNurseryIsLive(ObjectReference object) {
700 return ForwardingWord.isForwardedOrBeingForwarded(object) || ObjectHeader.testMarkState(object, markState);
701 }
702
703 /**
704 * Test the liveness of an object during defragmentation
705 *
706 * @param object The object in question
707 * @return {@code true} if this object is known to be live (i.e. it is marked)
708 */
709 @Inline
710 public boolean fastIsLive(ObjectReference object) {
711 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!defrag.inDefrag());
712 return ObjectHeader.testMarkState(object, markState);
713 }
714
715 @Inline
716 public boolean willNotMoveThisGC(ObjectReference object) {
717 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(object) == this && defrag.inDefrag());
718 return ObjectHeader.isPinnedObject(object) || willNotMoveThisGC(VM.objectModel.refToAddress(object));
719 }
720
721 @Inline
722 public boolean willNotMoveThisNurseryGC(ObjectReference object) {
723 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(object) == this);
724 return ObjectHeader.isMatureObject(object);
725 }
726
727 @Inline
728 private boolean isDefragSource(ObjectReference object) {
729 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(object) == this);
730 return isDefragSource(VM.objectModel.refToAddress(object));
731 }
732
733 @Inline
734 public boolean willNotMoveThisGC(Address address) {
735 return !defrag.inDefrag() || defrag.spaceExhausted() || !isDefragSource(address);
736 }
737
738 @Inline
739 public boolean isDefragSource(Address address) {
740 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(address.toObjectReference()) == this);
741 return Block.isDefragSource(address);
742 }
743
744
745 /****************************************************************************
746 *
747 * Locks
748 */
749
750 /**
751 * Acquire the appropriate lock depending on whether the context is
752 * GC or mutator.
753 */
754 private void lock() {
755 if (inCollection)
756 gcLock.acquire();
757 else
758 mutatorLock.acquire();
759 }
760
761 /**
762 * Release the appropriate lock depending on whether the context is
763 * GC or mutator.
764 */
765 private void unlock() {
766 if (inCollection)
767 gcLock.release();
768 else
769 mutatorLock.release();
770 }
771
772
773 /****************************************************************************
774 *
775 * Misc
776 */
777
778 /**
779 *
780 */
781 public static boolean isRecycleAllocChunkAligned(Address ptr) {
782 return ptr.toWord().and(RECYCLE_ALLOC_CHUNK_MASK).EQ(Word.zero());
783 }
784
785 ChunkList getChunkMap() { return chunkMap; }
786 Defrag getDefrag() { return defrag; }
787 }