Skip to content

Commit 2aa9c33

Browse files
authored
Merge pull request #2164 from SAP/pr-jdk-26+33
Merge to tag jdk-26+33
2 parents 845135c + 4cc4660 commit 2aa9c33

15 files changed

Lines changed: 499 additions & 114 deletions

File tree

doc/testing.html

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ <h1 class="title">Testing the JDK</h1>
7272
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
7373
locale</a></li>
7474
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
75+
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
7576
<li><a href="#testing-ahead-of-time-optimizations"
7677
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
7778
Optimizations</a></li>
@@ -621,6 +622,21 @@ <h3 id="pkcs11-tests">PKCS11 Tests</h3>
621622
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
622623
<p>For more notes about the PKCS11 tests, please refer to
623624
test/jdk/sun/security/pkcs11/README.</p>
625+
<h3 id="sctp-tests">SCTP Tests</h3>
626+
<p>The SCTP tests require the SCTP runtime library, which is often not
627+
installed by default in popular Linux distributions. Without this
628+
library, the SCTP tests will be skipped. If you want to enable the SCTP
629+
tests, you should install the SCTP library before running the tests.</p>
630+
<p>For distributions using the .deb packaging format and the apt tool
631+
(such as Debian, Ubuntu, etc.), try this:</p>
632+
<pre><code>sudo apt install libsctp1
633+
sudo modprobe sctp
634+
lsmod | grep sctp</code></pre>
635+
<p>For distributions using the .rpm packaging format and the dnf tool
636+
(such as Fedora, Red Hat, etc.), try this:</p>
637+
<pre><code>sudo dnf install -y lksctp-tools
638+
sudo modprobe sctp
639+
lsmod | grep sctp</code></pre>
624640
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
625641
Optimizations</h3>
626642
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations

doc/testing.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
640640
For more notes about the PKCS11 tests, please refer to
641641
test/jdk/sun/security/pkcs11/README.
642642

643+
644+
### SCTP Tests
645+
646+
The SCTP tests require the SCTP runtime library, which is often not installed
647+
by default in popular Linux distributions. Without this library, the SCTP tests
648+
will be skipped. If you want to enable the SCTP tests, you should install the
649+
SCTP library before running the tests.
650+
651+
For distributions using the .deb packaging format and the apt tool
652+
(such as Debian, Ubuntu, etc.), try this:
653+
654+
```
655+
sudo apt install libsctp1
656+
sudo modprobe sctp
657+
lsmod | grep sctp
658+
```
659+
660+
For distributions using the .rpm packaging format and the dnf tool
661+
(such as Fedora, Red Hat, etc.), try this:
662+
663+
```
664+
sudo dnf install -y lksctp-tools
665+
sudo modprobe sctp
666+
lsmod | grep sctp
667+
```
668+
643669
### Testing Ahead-of-time Optimizations
644670

645671
One way to improve test coverage of ahead-of-time (AOT) optimizations in

src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,11 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
7070

7171
template <DecoratorSet decorators, typename T>
7272
inline void G1BarrierSet::write_ref_field_post(T* field) {
73-
volatile CardValue* byte = _card_table->byte_for(field);
73+
// Make sure that the card table reference is read only once. Otherwise the compiler
74+
// might reload that value in the two accesses below, that could cause writes to
75+
// the wrong card table.
76+
CardTable* card_table = AtomicAccess::load(&_card_table);
77+
CardValue* byte = card_table->byte_for(field);
7478
if (*byte == G1CardTable::clean_card_val()) {
7579
*byte = G1CardTable::dirty_card_val();
7680
}

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1267,7 +1267,6 @@ class G1CollectedHeap : public CollectedHeap {
12671267

12681268
bool is_marked(oop obj) const;
12691269

1270-
inline static bool is_obj_filler(const oop obj);
12711270
// Determine if an object is dead, given the object and also
12721271
// the region to which the object belongs.
12731272
inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;

src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
#include "gc/g1/g1Policy.hpp"
3939
#include "gc/g1/g1RegionPinCache.inline.hpp"
4040
#include "gc/g1/g1RemSet.hpp"
41+
#include "gc/shared/collectedHeap.inline.hpp"
4142
#include "gc/shared/markBitMap.inline.hpp"
4243
#include "gc/shared/taskqueue.inline.hpp"
4344
#include "oops/stackChunkOop.hpp"
@@ -230,16 +231,11 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
230231
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
231232
}
232233

233-
inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
234-
Klass* k = obj->klass_without_asserts();
235-
return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
236-
}
237-
238234
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
239235
assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
240236
if (hr->is_in_parsable_area(obj)) {
241237
// This object is in the parsable part of the heap, live unless scrubbed.
242-
return is_obj_filler(obj);
238+
return is_filler_object(obj);
243239
} else {
244240
// From Remark until a region has been concurrently scrubbed, parts of the
245241
// region is not guaranteed to be parsable. Use the bitmap for liveness.

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
4444
#include "gc/parallel/psStringDedup.hpp"
4545
#include "gc/parallel/psYoungGen.hpp"
4646
#include "gc/shared/classUnloadingContext.hpp"
47+
#include "gc/shared/collectedHeap.inline.hpp"
4748
#include "gc/shared/fullGCForwarding.inline.hpp"
4849
#include "gc/shared/gcCause.hpp"
4950
#include "gc/shared/gcHeapSummary.hpp"
@@ -932,6 +933,17 @@ void PSParallelCompact::summary_phase(bool should_do_max_compaction)
932933
}
933934
}
934935

936+
void PSParallelCompact::report_object_count_after_gc() {
937+
GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
938+
// The heap is compacted, all objects are iterable. However there may be
939+
// filler objects in the heap which we should ignore.
940+
class SkipFillerObjectClosure : public BoolObjectClosure {
941+
public:
942+
bool do_object_b(oop obj) override { return !CollectedHeap::is_filler_object(obj); }
943+
} cl;
944+
_gc_tracer.report_object_count_after_gc(&cl, &ParallelScavengeHeap::heap()->workers());
945+
}
946+
935947
bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
936948
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
937949
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
@@ -1027,6 +1039,8 @@ bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_comp
10271039

10281040
heap->print_heap_change(pre_gc_values);
10291041

1042+
report_object_count_after_gc();
1043+
10301044
// Track memory usage and detect low memory
10311045
MemoryService::track_memory_usage();
10321046
heap->update_counters();
@@ -1274,10 +1288,6 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
12741288
}
12751289
}
12761290

1277-
{
1278-
GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
1279-
_gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
1280-
}
12811291
#if TASKQUEUE_STATS
12821292
ParCompactionManager::print_and_reset_taskqueue_stats();
12831293
#endif
@@ -1835,8 +1845,7 @@ void PSParallelCompact::verify_filler_in_dense_prefix() {
18351845
oop obj = cast_to_oop(cur_addr);
18361846
oopDesc::verify(obj);
18371847
if (!mark_bitmap()->is_marked(cur_addr)) {
1838-
Klass* k = cast_to_oop(cur_addr)->klass();
1839-
assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
1848+
assert(CollectedHeap::is_filler_object(cast_to_oop(cur_addr)), "inv");
18401849
}
18411850
cur_addr += obj->size();
18421851
}

src/hotspot/share/gc/parallel/psParallelCompact.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -749,6 +749,7 @@ class PSParallelCompact : AllStatic {
749749
// Move objects to new locations.
750750
static void compact();
751751

752+
static void report_object_count_after_gc();
752753
// Add available regions to the stack and draining tasks to the task queue.
753754
static void prepare_region_draining_tasks(uint parallel_gc_threads);
754755

src/hotspot/share/gc/shared/collectedHeap.hpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -309,6 +309,8 @@ class CollectedHeap : public CHeapObj<mtGC> {
309309
fill_with_object(start, pointer_delta(end, start), zap);
310310
}
311311

312+
inline static bool is_filler_object(oop obj);
313+
312314
virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
313315
static size_t min_dummy_object_size() {
314316
return oopDesc::header_size();

src/hotspot/share/gc/shared/collectedHeap.inline.hpp

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
2727

2828
#include "gc/shared/collectedHeap.hpp"
2929

30+
#include "classfile/vmClasses.hpp"
3031
#include "gc/shared/memAllocator.hpp"
32+
#include "memory/universe.hpp"
3133
#include "oops/oop.inline.hpp"
3234
#include "utilities/align.hpp"
3335

@@ -50,4 +52,9 @@ inline void CollectedHeap::add_vmthread_cpu_time(jlong time) {
5052
_vmthread_cpu_time += time;
5153
}
5254

55+
inline bool CollectedHeap::is_filler_object(oop obj) {
56+
Klass* k = obj->klass_without_asserts();
57+
return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
58+
}
59+
5360
#endif // SHARE_GC_SHARED_COLLECTEDHEAP_INLINE_HPP

src/hotspot/share/opto/subnode.cpp

Lines changed: 35 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -745,71 +745,40 @@ const Type* CmpINode::Value(PhaseGVN* phase) const {
745745

746746
// Simplify a CmpU (compare 2 integers) node, based on local information.
747747
// If both inputs are constants, compare them.
748-
const Type *CmpUNode::sub( const Type *t1, const Type *t2 ) const {
749-
assert(!t1->isa_ptr(), "obsolete usage of CmpU");
748+
const Type* CmpUNode::sub(const Type* t1, const Type* t2) const {
749+
const TypeInt* r0 = t1->is_int();
750+
const TypeInt* r1 = t2->is_int();
750751

751-
// comparing two unsigned ints
752-
const TypeInt *r0 = t1->is_int(); // Handy access
753-
const TypeInt *r1 = t2->is_int();
754-
755-
// Current installed version
756-
// Compare ranges for non-overlap
757-
juint lo0 = r0->_lo;
758-
juint hi0 = r0->_hi;
759-
juint lo1 = r1->_lo;
760-
juint hi1 = r1->_hi;
761-
762-
// If either one has both negative and positive values,
763-
// it therefore contains both 0 and -1, and since [0..-1] is the
764-
// full unsigned range, the type must act as an unsigned bottom.
765-
bool bot0 = ((jint)(lo0 ^ hi0) < 0);
766-
bool bot1 = ((jint)(lo1 ^ hi1) < 0);
767-
768-
if (bot0 || bot1) {
769-
// All unsigned values are LE -1 and GE 0.
770-
if (lo0 == 0 && hi0 == 0) {
771-
return TypeInt::CC_LE; // 0 <= bot
772-
} else if ((jint)lo0 == -1 && (jint)hi0 == -1) {
773-
return TypeInt::CC_GE; // -1 >= bot
774-
} else if (lo1 == 0 && hi1 == 0) {
775-
return TypeInt::CC_GE; // bot >= 0
776-
} else if ((jint)lo1 == -1 && (jint)hi1 == -1) {
777-
return TypeInt::CC_LE; // bot <= -1
778-
}
779-
} else {
780-
// We can use ranges of the form [lo..hi] if signs are the same.
781-
assert(lo0 <= hi0 && lo1 <= hi1, "unsigned ranges are valid");
782-
// results are reversed, '-' > '+' for unsigned compare
783-
if (hi0 < lo1) {
784-
return TypeInt::CC_LT; // smaller
785-
} else if (lo0 > hi1) {
786-
return TypeInt::CC_GT; // greater
787-
} else if (hi0 == lo1 && lo0 == hi1) {
788-
return TypeInt::CC_EQ; // Equal results
789-
} else if (lo0 >= hi1) {
790-
return TypeInt::CC_GE;
791-
} else if (hi0 <= lo1) {
792-
// Check for special case in Hashtable::get. (See below.)
793-
if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && is_index_range_check())
794-
return TypeInt::CC_LT;
795-
return TypeInt::CC_LE;
796-
}
797-
}
798752
// Check for special case in Hashtable::get - the hash index is
799753
// mod'ed to the table size so the following range check is useless.
800754
// Check for: (X Mod Y) CmpU Y, where the mod result and Y both have
801755
// to be positive.
802756
// (This is a gross hack, since the sub method never
803757
// looks at the structure of the node in any other case.)
804-
if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && is_index_range_check())
758+
if (r0->_lo >= 0 && r1->_lo >= 0 && is_index_range_check()) {
805759
return TypeInt::CC_LT;
760+
}
761+
762+
if (r0->_uhi < r1->_ulo) {
763+
return TypeInt::CC_LT;
764+
} else if (r0->_ulo > r1->_uhi) {
765+
return TypeInt::CC_GT;
766+
} else if (r0->is_con() && r1->is_con()) {
767+
// Since r0->_ulo == r0->_uhi == r0->get_con(), we only reach here if the constants are equal
768+
assert(r0->get_con() == r1->get_con(), "must reach a previous branch otherwise");
769+
return TypeInt::CC_EQ;
770+
} else if (r0->_uhi == r1->_ulo) {
771+
return TypeInt::CC_LE;
772+
} else if (r0->_ulo == r1->_uhi) {
773+
return TypeInt::CC_GE;
774+
}
806775

807776
const Type* joined = r0->join(r1);
808777
if (joined == Type::TOP) {
809778
return TypeInt::CC_NE;
810779
}
811780

812-
return TypeInt::CC; // else use worst case results
781+
return TypeInt::CC;
813782
}
814783

815784
const Type* CmpUNode::Value(PhaseGVN* phase) const {
@@ -963,59 +932,29 @@ const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
963932
// Simplify a CmpUL (compare 2 unsigned longs) node, based on local information.
964933
// If both inputs are constants, compare them.
965934
const Type* CmpULNode::sub(const Type* t1, const Type* t2) const {
966-
assert(!t1->isa_ptr(), "obsolete usage of CmpUL");
967-
968-
// comparing two unsigned longs
969-
const TypeLong* r0 = t1->is_long(); // Handy access
935+
const TypeLong* r0 = t1->is_long();
970936
const TypeLong* r1 = t2->is_long();
971937

972-
// Current installed version
973-
// Compare ranges for non-overlap
974-
julong lo0 = r0->_lo;
975-
julong hi0 = r0->_hi;
976-
julong lo1 = r1->_lo;
977-
julong hi1 = r1->_hi;
978-
979-
// If either one has both negative and positive values,
980-
// it therefore contains both 0 and -1, and since [0..-1] is the
981-
// full unsigned range, the type must act as an unsigned bottom.
982-
bool bot0 = ((jlong)(lo0 ^ hi0) < 0);
983-
bool bot1 = ((jlong)(lo1 ^ hi1) < 0);
984-
985-
if (bot0 || bot1) {
986-
// All unsigned values are LE -1 and GE 0.
987-
if (lo0 == 0 && hi0 == 0) {
988-
return TypeInt::CC_LE; // 0 <= bot
989-
} else if ((jlong)lo0 == -1 && (jlong)hi0 == -1) {
990-
return TypeInt::CC_GE; // -1 >= bot
991-
} else if (lo1 == 0 && hi1 == 0) {
992-
return TypeInt::CC_GE; // bot >= 0
993-
} else if ((jlong)lo1 == -1 && (jlong)hi1 == -1) {
994-
return TypeInt::CC_LE; // bot <= -1
995-
}
996-
} else {
997-
// We can use ranges of the form [lo..hi] if signs are the same.
998-
assert(lo0 <= hi0 && lo1 <= hi1, "unsigned ranges are valid");
999-
// results are reversed, '-' > '+' for unsigned compare
1000-
if (hi0 < lo1) {
1001-
return TypeInt::CC_LT; // smaller
1002-
} else if (lo0 > hi1) {
1003-
return TypeInt::CC_GT; // greater
1004-
} else if (hi0 == lo1 && lo0 == hi1) {
1005-
return TypeInt::CC_EQ; // Equal results
1006-
} else if (lo0 >= hi1) {
1007-
return TypeInt::CC_GE;
1008-
} else if (hi0 <= lo1) {
1009-
return TypeInt::CC_LE;
1010-
}
938+
if (r0->_uhi < r1->_ulo) {
939+
return TypeInt::CC_LT;
940+
} else if (r0->_ulo > r1->_uhi) {
941+
return TypeInt::CC_GT;
942+
} else if (r0->is_con() && r1->is_con()) {
943+
// Since r0->_ulo == r0->_uhi == r0->get_con(), we only reach here if the constants are equal
944+
assert(r0->get_con() == r1->get_con(), "must reach a previous branch otherwise");
945+
return TypeInt::CC_EQ;
946+
} else if (r0->_uhi == r1->_ulo) {
947+
return TypeInt::CC_LE;
948+
} else if (r0->_ulo == r1->_uhi) {
949+
return TypeInt::CC_GE;
1011950
}
1012951

1013952
const Type* joined = r0->join(r1);
1014953
if (joined == Type::TOP) {
1015954
return TypeInt::CC_NE;
1016955
}
1017956

1018-
return TypeInt::CC; // else use worst case results
957+
return TypeInt::CC;
1019958
}
1020959

1021960
//=============================================================================

0 commit comments

Comments
 (0)