diff --git a/.github/workflows/run-jdk-compliance-tests.yml b/.github/workflows/run-jdk-compliance-tests.yml index b5f7f425c9..45bece0b4d 100644 --- a/.github/workflows/run-jdk-compliance-tests.yml +++ b/.github/workflows/run-jdk-compliance-tests.yml @@ -22,6 +22,13 @@ jobs: os: [ubuntu-20.04, macos-11] scala: [3.3.0] java: [11, 17] + include: + - java: 17 + scala: 2.13.11 + os: ubuntu-20.04 + - java: 17 + scala: 2.12.17 + os: macos-11 steps: - uses: actions/checkout@v3 - uses: ./.github/actions/macos-setup-env diff --git a/docs/changelog/0.4.15.md b/docs/changelog/0.4.15.md new file mode 100644 index 0000000000..5c1e730858 --- /dev/null +++ b/docs/changelog/0.4.15.md @@ -0,0 +1,178 @@ + +# 0.4.15 (2023-09-01) + +We're happy to announce the release of Scala Native 0.4.15, which is the next maintenance release and includes mostly bug fixes and implements some of the missing JDK methods. + +We encourage you to test out the next major version nightlies available - 0.5.0-SNAPSHOT to catch the remaining multithreading issues before the final release. + + +Scala standard library used by this release is based on the following versions: + + + + + + + + + + + + + + + + + + + +
Scala binary versionScala release
2.122.12.18
2.132.13.11
33.3.0
+ + + + + + + + + + + + + + + + +
Commits since last release48
Merged PRs47
Contributors8
+ +## Contributors + +Big thanks to everybody who contributed to this release or reported an issue! + +``` +$ git shortlog -sn --no-merges v0.4.14..v0.4.15 + 27 LeeTibbert + 11 Wojciech Mazur + 3 Eric K Richardson + 2 Rikito Taniguchi + 2 Yifei Zhou + 1 Arman Bilge + 1 kim / Motoyuki Kimura + 1 spamegg +``` + +## Merged PRs + +## [v0.4.15](https://github.com/scala-native/scala-native/tree/v0.4.15) (2023-09-01) + +[Full Changelog](https://github.com/scala-native/scala-native/compare/v0.4.14...v0.4.15) + +**Merged pull requests:** + +## Java Standard Library +- Fix #3307, #3315: javalib *Stream.iterate characteristics now match JVM + [\#3317](https://github.com/scala-native/scala-native/pull/3317) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3333: javalib FileInputStream#available now matches JVM + [\#3338](https://github.com/scala-native/scala-native/pull/3338) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Replace all runtime platform checks with linktime conditions + [\#3335](https://github.com/scala-native/scala-native/pull/3335) + ([armanbilge](https://github.com/armanbilge)) +- Fix #3308, #3350: better reporting of javalib stream & spliterator characteristics + [\#3354](https://github.com/scala-native/scala-native/pull/3354) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix Subclassed `WeakReference` to be GCed + [\#3347](https://github.com/scala-native/scala-native/pull/3347) + ([mox692](https://github.com/mox692)) +- Fix #3329: javalib MappedByteBufferImpl no longer calls FileChannel truncate method + [\#3345](https://github.com/scala-native/scala-native/pull/3345) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3351: javalib LinkedList#spliterator now reports ORDERED characteristic. + [\#3361](https://github.com/scala-native/scala-native/pull/3361) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3340: javalib MappedByteBuffer now handles 0 byte ranges + [\#3360](https://github.com/scala-native/scala-native/pull/3360) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3316: javalib FileChannel append behavior now matches a JVM + [\#3368](https://github.com/scala-native/scala-native/pull/3368) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3352: javalib {Stream, DoubleStream}#sorted characteristics now match JVM + [\#3366](https://github.com/scala-native/scala-native/pull/3366) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3369: fix three defects in javalib FileChannel write methods + [\#3370](https://github.com/scala-native/scala-native/pull/3370) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3376: javalib {Stream#of, DoubleStream#of} characteristics now match a JVM + [\#3377](https://github.com/scala-native/scala-native/pull/3377) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3309: javalib stream limit methods now match Java 8 + [\#3390](https://github.com/scala-native/scala-native/pull/3390) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Add partial implementation of java.util.EnumSet + [\#3397](https://github.com/scala-native/scala-native/pull/3397) + ([WojciechMazur](https://github.com/WojciechMazur)) +- Implement & test javalib Random doubleStream methods + [\#3402](https://github.com/scala-native/scala-native/pull/3402) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Port `java.util.Vector` from Apache Harmony + [\#3403](https://github.com/scala-native/scala-native/pull/3403) + ([WojciechMazur](https://github.com/WojciechMazur)) +- Correct defect in javalib ThreadLocalRandom doubleStreams forEachRemaining + [\#3406](https://github.com/scala-native/scala-native/pull/3406) + ([LeeTibbert](https://github.com/LeeTibbert)) +- javalib Random class now uses a better spliterator for Streams + [\#3405](https://github.com/scala-native/scala-native/pull/3405) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Make UUID.compareTo() consistent with the JVM. + [\#3413](https://github.com/scala-native/scala-native/pull/3413) + ([Bensonater](https://github.com/Bensonater)) +- Support java.util.StringJoiner + [\#3396](https://github.com/scala-native/scala-native/pull/3396) + ([spamegg1](https://github.com/spamegg1)) +- Fix #3409: Remove defects from Collectors#joining method + [\#3421](https://github.com/scala-native/scala-native/pull/3421) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Evolve new javalib StringJoiner class + [\#3422](https://github.com/scala-native/scala-native/pull/3422) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3408: Implement two javalib static String join() methods. + [\#3420](https://github.com/scala-native/scala-native/pull/3420) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3426: Towards a better javalib Stream.toArray(generator) + [\#3428](https://github.com/scala-native/scala-native/pull/3428) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3417: Remove maxDepth related defects from javalib Files methods + [\#3430](https://github.com/scala-native/scala-native/pull/3430) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3431: javalib Matcher.reset(input) now updates underlying regex. + [\#3432](https://github.com/scala-native/scala-native/pull/3432) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3378: javalib {Stream, DoubleStream}#sorted now delays actual sort to a terminal operation + [\#3434](https://github.com/scala-native/scala-native/pull/3434) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3439: Implement javalib sequential setAll methods + [\#3441](https://github.com/scala-native/scala-native/pull/3441) + ([LeeTibbert](https://github.com/LeeTibbert)) +- Fix #3440: Provide a restricted implementation of javalib Array parallel methods + [\#3445](https://github.com/scala-native/scala-native/pull/3445) + ([LeeTibbert](https://github.com/LeeTibbert)) + +## Scala Native runtime +- Add `atRawUnsafe` and `atUnsafe` to array classes + [\#3327](https://github.com/scala-native/scala-native/pull/3327) + ([armanbilge](https://github.com/armanbilge)) + +## Scala Native toolchain +- Detect block cycles using stackalloc op + [\#3416](https://github.com/scala-native/scala-native/pull/3416) + ([WojciechMazur](https://github.com/WojciechMazur)) +- Fixing Bug in Lowering: Handling Op.Copy for ClassOf Transformation + [\#3447](https://github.com/scala-native/scala-native/pull/3447) + ([tanishiking](https://github.com/tanishiking)) + + +## Documentation +- Create native code page and add forward links + [\#3462](https://github.com/scala-native/scala-native/pull/3462) + ([ekrich](https://github.com/ekrich)) diff --git a/docs/changelog/index.rst b/docs/changelog/index.rst index 30cc812ce9..3608fd0184 100644 --- a/docs/changelog/index.rst +++ b/docs/changelog/index.rst @@ -6,6 +6,7 @@ Changelog .. toctree:: :maxdepth: 1 + 0.4.15 0.4.14 0.4.13 0.4.12 diff --git a/docs/conf.py b/docs/conf.py index 074389c321..4dd866427d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -69,9 +69,9 @@ def generateScalaNativeCurrentYear(): # built documents. # # The short X.Y version. -version = u'0.4.14' +version = u'0.4.15' # The full version, including alpha/beta/rc tags. -release = u'0.4.14' +release = u'0.4.15' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/lib/javalib.rst b/docs/lib/javalib.rst index c78d8241af..f3486a3992 100644 --- a/docs/lib/javalib.rst +++ b/docs/lib/javalib.rst @@ -447,6 +447,7 @@ java.util * ``SortedSet`` * ``Spliterator`` * ``Spliterators`` +* ``StringJoiner`` * ``StringTokenizer`` * ``TooManyListenersException`` * ``TreeSet`` @@ -549,7 +550,7 @@ java.util.zip * ``ZipInputStream`` * ``ZipOutputStream`` - + **Note:** This is an ongoing effort, some of the classes listed here might be partially implemented. Please consult `javalib sources `_ @@ -568,7 +569,7 @@ Some notes on the implementation: 1. The included RE2 implements a Unicode version lower than the version used in the Scala Native Character class (>= 7.0.0). The RE2 Unicode version is in the 6.n range. For reference, Java 8 - released with Unicode 6.2.0. + released with Unicode 6.2.0. The RE2 implemented may not match codepoints added or changed in later Unicode versions. Similarly, there may be slight differences diff --git a/docs/user/index.rst b/docs/user/index.rst index 3be5845267..f86f91ec0f 100644 --- a/docs/user/index.rst +++ b/docs/user/index.rst @@ -10,6 +10,8 @@ User's Guide sbt lang interop + native testing profiling runtime + diff --git a/docs/user/interop.rst b/docs/user/interop.rst index d68c01fe01..52a5f20d90 100644 --- a/docs/user/interop.rst +++ b/docs/user/interop.rst @@ -511,4 +511,4 @@ using ``byteValue.toUByte``, ``shortValue.toUShort``, ``intValue.toUInt``, ``lon and conversely ``unsignedByteValue.toByte``, ``unsignedShortValue.toShort``, ``unsignedIntValue.toInt``, ``unsignedLongValue.toLong``. -Continue to :ref:`lib`. +Continue to :ref:`native`. diff --git a/docs/user/native.rst b/docs/user/native.rst new file mode 100644 index 0000000000..1bee69d7d3 --- /dev/null +++ b/docs/user/native.rst @@ -0,0 +1,87 @@ +.. _native: + +Native Code in your Application or Library +========================================== + +Scala Native uses native C and C++ code to interact with the underlying +platform and operating system. Since the tool chain compiles and links +the Scala Native system, it can also compile and link C and C++ code +included in an application project or a library that supports Scala +Native that includes C and/or C++ source code. + +Supported file extensions for native code are `.c`, `.cpp`, and `.S`. + +Note that `.S` files or assembly code is not portable across different CPU +architectures so conditional compilation would be needed to support +more than one architecture. You can also include header files with +the extensions `.h` and `.hpp`. + +Applications with Native Code +----------------------------- + +In order to create standalone native projects with native code use the +following procedure. You can start with the basic Scala Native template. + +Add C/C++ code into `src/main/resources/scala-native`. The code can be put in +subdirectories as desired inside the `scala-native` directory. As an example, +create a file named `myapi.c` and put it into your `scala-native` directory +as described above. + +.. code-block:: c + + long long add3(long long in) { return in + 3; } + +Next, create a main file as follows: + +.. code-block:: scala + + import scalanative.unsafe._ + + @extern + object myapi { + def add3(in: CLongLong): CLongLong = extern + } + + object Main { + import myapi._ + def main(args: Array[String]): Unit = { + val res = add3(-3L) + assert(res == 0L) + println(s"Add3 to -3 = $res") + } + } + +Finally, compile and run this like a normal Scala Native application. + +Using libraries with Native Code +------------------------------------------ + +Libraries developed to target the Scala Native platform +can have C, C++, or assembly files included in the dependency. The code is +added to `src/main/resources/scala-native` and is published like a normal +Scala library. The code can be put in subdirectories as desired inside the +`scala-native` directory. These libraries can also be cross built to +support Scala/JVM or Scala.js if the Native portions have replacement +code on the respective platforms. + +The primary purpose of this feature is to allow libraries to support +Scala Native that need native "glue" code to operate. The current +C interopt does not allow direct access to macro defined constants and +functions or allow passing "struct"s from the stack to C functions. +Future versions of Scala Native may relax these restrictions making +this feature obsolete. + +Note: This feature is not a replacement for developing or distributing +native C/C++ libraries and should not be used for this purpose. + +If the dependency contains native code, Scala Native will identify the +library as a dependency that has native code and will unpack the library. +Next, it will compile, link, and optimize any native code along with the +Scala Native runtime and your application code. No additional information +is needed in the build file other than the normal dependency so it is +transparent to the library user. + +Using a library that contains native code can be used in combination with +the feature above that allows native code in your application. + +Continue to :ref:`testing`. diff --git a/docs/user/profiling.rst b/docs/user/profiling.rst index 5e5e403f79..286ff7f152 100644 --- a/docs/user/profiling.rst +++ b/docs/user/profiling.rst @@ -77,3 +77,5 @@ A `flamegraph `_ is a visualizatio * Open the file ``kernel.svg`` in your browser and you can zoom in the interactive SVG-file by clicking on the colored boxes as explained `here `_. A box represents a stack frame. The broader a box is the more CPU cycles have been spent. The higher the box is, the deeper in the call-chain it is. * The perf option ``-F 1000`` means that the sampling frequency is set to 1000 Hz. You can experiment with changing this option to get the right accuracy; start with e.g. ``-F 99`` and see what you get. You can then increase the sampling frequency to see if more details adds interesting information. + +Continue to :ref:`runtime`. diff --git a/docs/user/runtime.rst b/docs/user/runtime.rst index 045e307c7a..17c4d754d9 100644 --- a/docs/user/runtime.rst +++ b/docs/user/runtime.rst @@ -71,4 +71,4 @@ your executable as needed: $ export SCALANATIVE_MIN_SIZE=2m; export SCALANATIVE_MAX_SIZE=1m; sandbox/.2.13/target/scala-2.13/sandbox-out SCALANATIVE_MAX_HEAP_SIZE should be at least SCALANATIVE_MIN_HEAP_SIZE - +Continue to :ref:`lib`. diff --git a/docs/user/sbt.rst b/docs/user/sbt.rst index 143b961a96..7e98f54ef1 100644 --- a/docs/user/sbt.rst +++ b/docs/user/sbt.rst @@ -329,91 +329,6 @@ package resolution system. .. _sonatype: https://github.com/xerial/sbt-sonatype .. _bintray: https://github.com/sbt/sbt-bintray -Including Native Code in your Application or Library ----------------------------------------------------- - -Scala Native uses native C and C++ code to interact with the underlying -platform and operating system. Since the tool chain compiles and links -the Scala Native system, it can also compile and link C and C++ code -included in an application project or a library that supports Scala -Native that includes C and/or C++ source code. - -Supported file extensions for native code are `.c`, `.cpp`, and `.S`. - -Note that `.S` files or assembly code is not portable across different CPU -architectures so conditional compilation would be needed to support -more than one architecture. You can also include header files with -the extensions `.h` and `.hpp`. - -Applications with Native Code ------------------------------ - -In order to create standalone native projects with native code use the -following procedure. You can start with the basic Scala Native template. - -Add C/C++ code into `src/main/resources/scala-native`. The code can be put in -subdirectories as desired inside the `scala-native` directory. As an example, -create a file named `myapi.c` and put it into your `scala-native` directory -as described above. - -.. code-block:: c - - long long add3(long long in) { return in + 3; } - -Next, create a main file as follows: - -.. code-block:: scala - - import scalanative.unsafe._ - - @extern - object myapi { - def add3(in: CLongLong): CLongLong = extern - } - - object Main { - import myapi._ - def main(args: Array[String]): Unit = { - val res = add3(-3L) - assert(res == 0L) - println(s"Add3 to -3 = $res") - } - } - -Finally, compile and run this like a normal Scala Native application. - - -Using libraries with Native Code ------------------------------------------- - -Libraries developed to target the Scala Native platform -can have C, C++, or assembly files included in the dependency. The code is -added to `src/main/resources/scala-native` and is published like a normal -Scala library. The code can be put in subdirectories as desired inside the -`scala-native` directory. These libraries can also be cross built to -support Scala/JVM or Scala.js if the Native portions have replacement -code on the respective platforms. - -The primary purpose of this feature is to allow libraries to support -Scala Native that need native "glue" code to operate. The current -C interopt does not allow direct access to macro defined constants and -functions or allow passing "struct"s from the stack to C functions. -Future versions of Scala Native may relax these restrictions making -this feature obsolete. - -Note: This feature is not a replacement for developing or distributing -native C/C++ libraries and should not be used for this purpose. - -If the dependency contains native code, Scala Native will identify the -library as a dependency that has native code and will unpack the library. -Next, it will compile, link, and optimize any native code along with the -Scala Native runtime and your application code. No additional information -is needed in the build file other than the normal dependency so it is -transparent to the library user. - -This feature can be used in combination with the feature above that -allows native code in your application. - Cross compilation ----------------- diff --git a/docs/user/testing.rst b/docs/user/testing.rst index 52381750fc..d5016287bf 100644 --- a/docs/user/testing.rst +++ b/docs/user/testing.rst @@ -49,3 +49,5 @@ You may also use `testOnly` to run a particular test, for example: testOnly MyTest testOnly MyTest.superComplicatedTest + +Continue to :ref:`profiling`. diff --git a/javalib/src/main/scala/java/io/File.scala b/javalib/src/main/scala/java/io/File.scala index 639c591812..b20cfa1108 100644 --- a/javalib/src/main/scala/java/io/File.scala +++ b/javalib/src/main/scala/java/io/File.scala @@ -838,8 +838,10 @@ object File { // found an absolute path. continue from there. case link if link(0) == separatorChar => - if (Platform.isWindows() && strncmp(link, c"\\\\?\\", 4.toUInt) == 0) - path + if (isWindows) + if (strncmp(link, c"\\\\?\\", 4.toUInt) == 0) + path + else resolveLink(link, resolveAbsolute, restart = resolveAbsolute) else resolveLink(link, resolveAbsolute, restart = resolveAbsolute) @@ -940,13 +942,13 @@ object File { } } - val pathSeparatorChar: Char = if (Platform.isWindows()) ';' else ':' + val pathSeparatorChar: Char = if (isWindows) ';' else ':' val pathSeparator: String = pathSeparatorChar.toString - val separatorChar: Char = if (Platform.isWindows()) '\\' else '/' + val separatorChar: Char = if (isWindows) '\\' else '/' val separator: String = separatorChar.toString private var counter: Int = 0 private var counterBase: Int = 0 - private val caseSensitive: Boolean = !Platform.isWindows() + private val caseSensitive: Boolean = !isWindows def listRoots(): Array[File] = { val list = new java.util.ArrayList[File]() diff --git a/javalib/src/main/scala/java/lang/String.scala b/javalib/src/main/scala/java/lang/String.scala index cccb66d20c..620a7625e6 100644 --- a/javalib/src/main/scala/java/lang/String.scala +++ b/javalib/src/main/scala/java/lang/String.scala @@ -10,6 +10,7 @@ import java.util.regex._ import java.nio._ import java.nio.charset._ import java.util.Objects +import java.util.ScalaOps._ import java.lang.constant.{Constable, ConstantDesc} import scala.annotation.{switch, tailrec} import _String.{string2_string, _string2string} @@ -1545,6 +1546,30 @@ object _String { def copyValueOf(data: Array[Char]): _String = new _String(data, 0, data.length) + def format(fmt: _String, args: Array[AnyRef]): _String = + new Formatter().format(fmt, args).toString + + def format(loc: Locale, fmt: _String, args: Array[AnyRef]): _String = + new Formatter(loc).format(fmt, args).toString() + + def join(delimiter: CharSequence, elements: Array[CharSequence]): String = { + val sj = new StringJoiner(delimiter) + + for (j <- 0 until elements.length) + sj.add(elements(j)) + + sj.toString() + } + + def join( + delimiter: CharSequence, + elements: Iterable[CharSequence] + ): String = { + elements.scalaOps + .foldLeft(new StringJoiner(delimiter))((j, e) => j.add(e)) + .toString() + } + def valueOf(data: Array[Char]): _String = new _String(data) def valueOf(data: Array[Char], start: Int, length: Int): _String = @@ -1571,12 +1596,6 @@ object _String { def valueOf(value: AnyRef): _String = if (value != null) value.toString else "null" - def format(fmt: _String, args: Array[AnyRef]): _String = - new Formatter().format(fmt, args).toString - - def format(loc: Locale, fmt: _String, args: Array[AnyRef]): _String = - new Formatter(loc).format(fmt, args).toString() - import scala.language.implicitConversions @inline private[lang] implicit def _string2string(s: _String): String = s.asInstanceOf[String] diff --git a/javalib/src/main/scala/java/lang/System.scala b/javalib/src/main/scala/java/lang/System.scala index 0987792769..3988ef6f18 100644 --- a/javalib/src/main/scala/java/lang/System.scala +++ b/javalib/src/main/scala/java/lang/System.scala @@ -92,7 +92,7 @@ object System { new PrintStream(new FileOutputStream(FileDescriptor.err)) def lineSeparator(): String = { - if (Platform.isWindows()) "\r\n" + if (isWindows) "\r\n" else "\n" } diff --git a/javalib/src/main/scala/java/lang/Thread.scala b/javalib/src/main/scala/java/lang/Thread.scala index d36c964b6a..c06e7f2d57 100644 --- a/javalib/src/main/scala/java/lang/Thread.scala +++ b/javalib/src/main/scala/java/lang/Thread.scala @@ -10,6 +10,10 @@ class Thread private (runnable: Runnable) extends Runnable { private var interruptedState = false private[this] var name: String = "main" // default name of the main thread + private[java] var threadLocalRandomSeed: scala.Long = 0 + private[java] var threadLocalRandomProbe: Int = 0 + private[java] var threadLocalRandomSecondarySeed: Int = 0 + def run(): Unit = () def interrupt(): Unit = diff --git a/javalib/src/main/scala/java/net/NetworkInterface.scala b/javalib/src/main/scala/java/net/NetworkInterface.scala index 437c66c33d..3d16fc712e 100644 --- a/javalib/src/main/scala/java/net/NetworkInterface.scala +++ b/javalib/src/main/scala/java/net/NetworkInterface.scala @@ -25,7 +25,6 @@ import scala.scalanative.posix.string._ import scala.scalanative.posix.unistd import scala.scalanative.meta.LinktimeInfo -import scala.scalanative.runtime.Platform import macOsIf._ import macOsIfDl._ @@ -61,21 +60,21 @@ class NetworkInterface private (ifName: String) { def getDisplayName(): String = getName() def getHardwareAddress(): Array[Byte] = { - if (Platform.isWindows()) new Array[Byte](0) // No Windows support + if (LinktimeInfo.isWindows) new Array[Byte](0) // No Windows support else { NetworkInterface.unixImplGetHardwareAddress(ifName) } } def getIndex(): Int = { - if (Platform.isWindows()) 0 // No Windows support + if (LinktimeInfo.isWindows) 0 // No Windows support else { NetworkInterface.unixImplGetIndex(ifName) } } def getInetAddresses(): ju.Enumeration[InetAddress] = { - if (Platform.isWindows()) { // No Windows support + if (LinktimeInfo.isWindows) { // No Windows support ju.Collections.enumeration[InetAddress](new ju.ArrayList[InetAddress]) } else { NetworkInterface.unixImplGetInetAddresses(ifName) @@ -83,7 +82,7 @@ class NetworkInterface private (ifName: String) { } def getInterfaceAddresses(): ju.List[InterfaceAddress] = { - if (Platform.isWindows()) { // No Windows support + if (LinktimeInfo.isWindows) { // No Windows support ju.Collections.emptyList[InterfaceAddress]() } else { NetworkInterface.unixImplGetInterfaceAddresses(ifName) @@ -91,7 +90,7 @@ class NetworkInterface private (ifName: String) { } def getMTU(): Int = { - if (Platform.isWindows()) 0 // No Windows support + if (LinktimeInfo.isWindows) 0 // No Windows support else { NetworkInterface.unixImplGetIfMTU(ifName) } @@ -100,7 +99,7 @@ class NetworkInterface private (ifName: String) { def getName(): String = ifName def getParent(): NetworkInterface = { - if (Platform.isWindows()) null // No Windows support + if (LinktimeInfo.isWindows) null // No Windows support else if (!this.isVirtual()) null else { val parentName = ifName.split(":")(0) @@ -112,7 +111,7 @@ class NetworkInterface private (ifName: String) { val ifList = new ju.ArrayList[NetworkInterface]() // No Windows support, so empty Enumeration will be returned. - if (!Platform.isWindows()) { + if (!LinktimeInfo.isWindows) { val allIfs = NetworkInterface.getNetworkInterfaces() val matchMe = s"${ifName}:" while (allIfs.hasMoreElements()) { @@ -126,14 +125,15 @@ class NetworkInterface private (ifName: String) { } def inetAddresses(): Stream[InetAddress] = { - if (Platform.isWindows()) Stream.empty[InetAddress]() // No Windows support + if (LinktimeInfo.isWindows) + Stream.empty[InetAddress]() // No Windows support else { NetworkInterface.unixImplInetAddresses(ifName) } } def isLoopback(): Boolean = { - if (Platform.isWindows()) false // No Windows support + if (LinktimeInfo.isWindows) false // No Windows support else { val ifFlags = NetworkInterface.unixImplGetIfFlags(ifName) (ifFlags & unixIf.IFF_LOOPBACK) == unixIf.IFF_LOOPBACK @@ -141,7 +141,7 @@ class NetworkInterface private (ifName: String) { } def isPointToPoint(): Boolean = { - if (Platform.isWindows()) false // No Windows support + if (LinktimeInfo.isWindows) false // No Windows support else { val ifFlags = NetworkInterface.unixImplGetIfFlags(ifName) (ifFlags & unixIf.IFF_POINTOPOINT) == unixIf.IFF_POINTOPOINT @@ -149,7 +149,7 @@ class NetworkInterface private (ifName: String) { } def isUp(): Boolean = { - if (Platform.isWindows()) false // No Windows support + if (LinktimeInfo.isWindows) false // No Windows support else { val ifFlags = NetworkInterface.unixImplGetIfFlags(ifName) (ifFlags & unixIf.IFF_UP) == unixIf.IFF_UP @@ -168,7 +168,7 @@ class NetworkInterface private (ifName: String) { } def supportsMulticast(): Boolean = { - if (Platform.isWindows()) false // No Windows support + if (LinktimeInfo.isWindows) false // No Windows support else { val ifFlags = NetworkInterface.unixImplGetIfFlags(ifName) (ifFlags & unixIf.IFF_MULTICAST) == unixIf.IFF_MULTICAST @@ -187,7 +187,7 @@ object NetworkInterface { if (index < 0) throw new IllegalArgumentException("Interface index can't be negative") - if (Platform.isWindows()) { + if (LinktimeInfo.isWindows) { null } else { unixGetByIndex(index) @@ -196,7 +196,7 @@ object NetworkInterface { def getByInetAddress(addr: InetAddress): NetworkInterface = { Objects.requireNonNull(addr) - if (Platform.isWindows()) { + if (LinktimeInfo.isWindows) { null } else { unixGetByInetAddress(addr) @@ -205,7 +205,7 @@ object NetworkInterface { def getByName(name: String): NetworkInterface = { Objects.requireNonNull(name) - if (Platform.isWindows()) { + if (LinktimeInfo.isWindows) { null } else { unixGetByName(name) @@ -213,7 +213,7 @@ object NetworkInterface { } def getNetworkInterfaces(): ju.Enumeration[NetworkInterface] = { - if (Platform.isWindows()) { + if (LinktimeInfo.isWindows) { null } else { unixGetNetworkInterfaces() @@ -224,7 +224,7 @@ object NetworkInterface { * less clumsy than Enumerations. */ def networkInterfaces(): Stream[NetworkInterface] = { - if (Platform.isWindows()) { + if (LinktimeInfo.isWindows) { null } else { unixNetworkInterfaces() diff --git a/javalib/src/main/scala/java/nio/MappedByteBufferData.scala b/javalib/src/main/scala/java/nio/MappedByteBufferData.scala index 402733353e..683ab0ffba 100644 --- a/javalib/src/main/scala/java/nio/MappedByteBufferData.scala +++ b/javalib/src/main/scala/java/nio/MappedByteBufferData.scala @@ -45,14 +45,44 @@ private[nio] class MappedByteBufferData( private[nio] val windowsMappingHandle: Option[Handle] ) { + /* Create an "empty" instance for the special case of size == 0. + * This removes that complexity from the execution paths of the + * more frequently used size > 0 case. + * + * Keep the nasty null confined to this file, so caller does not + * need to know about it. + * + * Execution should never reach update() or apply() (bb.get()). + * Code earlier in the execution chain should have detected and rejected + * an attempt to access an empty MappedByteBufferData instance. + * Have those two methods return "reasonable" values just in case. + * Could have thrown an Exception. Fielder's choice. + * + * Since it is never called, the return value for apply() is just to + * keep the compiler happy; it can be any Byte, zero seemed to make the + * most sense. Fielder's choice redux. + */ + def this() = { + this(MapMode.READ_ONLY, null, 0, None) + def force(): Unit = () // do nothing + def update(index: Int, value: Byte): Unit = () // do nothing + def apply(index: Int): Byte = 0 // Should never reach here + } + // Finalization. Unmapping is done on garbage collection, like on JVM. - private val selfWeakReference = new WeakReference(this) - new MappedByteBufferFinalizer( - selfWeakReference, - ptr, - length, - windowsMappingHandle - ) +// private val selfWeakReference = new WeakReference(this) + + if (ptr != null) { + // Finalization. Unmapping is done on garbage collection, like on JVM. + val selfWeakReference = new WeakReference(this) + + new MappedByteBufferFinalizer( + selfWeakReference, + ptr, + length, + windowsMappingHandle + ) + } def force(): Unit = { if (mode eq MapMode.READ_WRITE) { diff --git a/javalib/src/main/scala/java/nio/MappedByteBufferImpl.scala b/javalib/src/main/scala/java/nio/MappedByteBufferImpl.scala index 08f326fed7..38f173b1ab 100644 --- a/javalib/src/main/scala/java/nio/MappedByteBufferImpl.scala +++ b/javalib/src/main/scala/java/nio/MappedByteBufferImpl.scala @@ -324,33 +324,47 @@ private[nio] object MappedByteBufferImpl { new MappedByteBufferData(mode, ptr, size, None) } - def apply( - mode: MapMode, + private def mapData( position: Long, size: Int, fd: FileDescriptor, - channel: FileChannel - ): MappedByteBufferImpl = { - - // JVM resizes file to accomodate mapping - if (mode ne MapMode.READ_ONLY) { - val prevSize = channel.size() - val minSize = position + size - if (minSize > prevSize) { - val prevPosition = channel.position() - channel.truncate(minSize) - if (isWindows) { - channel.position(prevSize) - for (i <- prevSize until minSize) - channel.write(ByteBuffer.wrap(Array[Byte](0.toByte))) - channel.position(prevPosition) - } - } - } + mode: MapMode + ): MappedByteBufferData = { - val mappedData = + if (size > 0) { if (isWindows) mapWindows(position, size, fd, mode) else mapUnix(position, size, fd, mode) + } else { + /* Issue #3340 + * JVM silently succeeds on MappedByteBuffer creation and + * throws "IndexOutOfBoundsException" on access; get or put. + * + * Create and use an "empty" MappedByteBuffer so that Scala Native + * matches the JVM behavior. + * + * POSIX and most (all?) unix-like systems explicitly do not + * allow mapping zero bytes and mapUnix() will throw an Exception. + * + * On Windows, a request to map zero bytes causes the entire + * file to be mapped. At the least, expensive in I/O and memory + * for bytes which will never be used. The call to MapViewOfFile() + * in mapWindows() may or may not use the same semantics. Someone + * with Windows skills would have to check. Knowing the zero size, + * it is easier to match the JDK by creating an empty + * MappedByteBufferData on the Windows branch also. + */ + new MappedByteBufferData() + } + } + + def apply( + mode: MapMode, + position: Long, + size: Int, + fd: FileDescriptor + ): MappedByteBufferImpl = { + + val mappedData = mapData(position, size, fd, mode) new MappedByteBufferImpl( mappedData.length, diff --git a/javalib/src/main/scala/java/nio/channels/FileChannel.scala b/javalib/src/main/scala/java/nio/channels/FileChannel.scala index f828e63432..414d9f75df 100644 --- a/javalib/src/main/scala/java/nio/channels/FileChannel.scala +++ b/javalib/src/main/scala/java/nio/channels/FileChannel.scala @@ -90,18 +90,21 @@ object FileChannel { ): FileChannel = { import StandardOpenOption._ - if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) { - throw new IllegalArgumentException( - "APPEND + TRUNCATE_EXISTING not allowed" - ) - } + val appending = options.contains(APPEND) + val writing = options.contains(WRITE) || appending + + if (appending) { + if (options.contains(TRUNCATE_EXISTING)) { + throw new IllegalArgumentException( + "APPEND + TRUNCATE_EXISTING not allowed" + ) + } - if (options.contains(APPEND) && options.contains(READ)) { - throw new IllegalArgumentException("APPEND + READ not allowed") + if (options.contains(READ)) { + throw new IllegalArgumentException("READ + APPEND not allowed") + } } - val writing = options.contains(WRITE) || options.contains(APPEND) - val mode = new StringBuilder("r") if (writing) mode.append("w") @@ -127,13 +130,8 @@ object FileChannel { val raf = tryRandomAccessFile(path.toString, mode.toString) try { - if (writing && options.contains(TRUNCATE_EXISTING)) { + if (writing && options.contains(TRUNCATE_EXISTING)) raf.setLength(0L) - } - - if (writing && options.contains(APPEND)) { - raf.seek(raf.length()) - } new FileChannelImpl( raf.getFD(), @@ -141,7 +139,8 @@ object FileChannel { deleteFileOnClose = options.contains(StandardOpenOption.DELETE_ON_CLOSE), openForReading = true, - openForWriting = writing + openForWriting = writing, + openForAppending = appending ) } catch { case e: Throwable => diff --git a/javalib/src/main/scala/java/nio/channels/FileChannelImpl.scala b/javalib/src/main/scala/java/nio/channels/FileChannelImpl.scala index 2ee2fa68af..d1087b3d0e 100644 --- a/javalib/src/main/scala/java/nio/channels/FileChannelImpl.scala +++ b/javalib/src/main/scala/java/nio/channels/FileChannelImpl.scala @@ -1,26 +1,28 @@ package java.nio.channels -import java.nio.file.Files - import java.nio.{ByteBuffer, MappedByteBuffer, MappedByteBufferImpl} +import java.nio.channels.FileChannel.MapMode +import java.nio.file.Files import java.nio.file.WindowsException + import scala.scalanative.nio.fs.unix.UnixException import java.io.FileDescriptor import java.io.File +import java.util.Objects + import scala.scalanative.meta.LinktimeInfo.isWindows import java.io.IOException import scala.scalanative.posix.fcntl._ import scala.scalanative.posix.fcntlOps._ -import scala.scalanative.libc.stdio +import scala.scalanative.libc.{stdio, string} import scala.scalanative.unsafe._ import scala.scalanative.posix.unistd import scala.scalanative.unsigned._ import scala.scalanative.{runtime, windows} -import scalanative.libc.stdio import scala.scalanative.libc.errno import scala.scalanative.windows.ErrorHandlingApi @@ -36,16 +38,55 @@ private[java] final class FileChannelImpl( file: Option[File], deleteFileOnClose: Boolean, openForReading: Boolean, - openForWriting: Boolean + openForWriting: Boolean, + openForAppending: Boolean = false ) extends FileChannel { - override def force(metadata: Boolean): Unit = - fd.sync() + /* Note: + * Channels are described in the Java documentation as thread-safe. + * This implementation is, most patently _not_ thread-safe. + * Use with only one thread accessing the channel, even for READS. + */ + + if (openForAppending) + seekEOF() // so a position() before first APPEND write() matches JVM. - @inline private def assertIfCanLock(): Unit = { + private def ensureOpen(): Unit = if (!isOpen()) throw new ClosedChannelException() - if (!openForWriting) throw new NonWritableChannelException() + + private def ensureOpenForWrite(): Unit = { + ensureOpen() + if (!openForWriting) + throw new NonWritableChannelException() + } + + private def seekEOF(): Unit = { + if (isWindows) { + SetFilePointerEx( + fd.handle, + distanceToMove = 0, + newFilePointer = null, + moveMethod = FILE_END + ) + } else { + val pos = unistd.lseek(fd.fd, 0, stdio.SEEK_END); + if (pos < 0) + throwPosixException("lseek") + } + } + + private def throwPosixException(functionName: String): Unit = { + if (!isWindows) { + val errnoString = fromCString(string.strerror(errno.errno)) + throw new IOException(s"${functionName} failed: ${errnoString}") + } } + override def force(metadata: Boolean): Unit = + fd.sync() + + @inline private def assertIfCanLock(): Unit = + ensureOpenForWrite() + override def tryLock( position: Long, size: Long, @@ -122,14 +163,49 @@ private[java] final class FileChannelImpl( position: Long, size: Long ): MappedByteBuffer = { - if ((mode eq FileChannel.MapMode.READ_ONLY) && !openForReading) + if (!openForReading) throw new NonReadableChannelException - if ((mode eq FileChannel.MapMode.READ_WRITE) && (!openForReading || !openForWriting)) - throw new NonWritableChannelException - MappedByteBufferImpl(mode, position, size.toInt, fd, this) + + // JVM states position is non-negative, hence 0 is allowed. + if (position < 0) + throw new IllegalArgumentException("Negative position") + + /* JVM requires the "size" argument to be a long, but throws + * an exception if that long is greater than Integer.MAX_VALUE. + * toInt() would cause such a large value to rollover to a negative value. + * + * Call to MappedByteBufferImpl() below truncates its third argument + * to an Int, knowing this guard is in place. + * + * Java is playing pretty fast & loose with its Ints & Longs, but that is + * the specification & practice that needs to be followed. + */ + + if ((size < 0) || (size > Integer.MAX_VALUE)) + throw new IllegalArgumentException("Negative size") + + ensureOpen() + + if (mode ne MapMode.READ_ONLY) { + // FileChannel.open() has previously rejected READ + APPEND combination. + if (!openForWriting) + throw new NonWritableChannelException + + // This "lengthen" branch is tested/exercised in MappedByteBufferTest. + // Look in MappedByteBufferTest for tests of this "lengthen" block. + val currentFileSize = this.size() + // Detect Long overflow & throw. Room for improvement here. + val newFileSize = Math.addExact(position, size) + if (newFileSize > currentFileSize) + this.lengthen(newFileSize) + } + + // RE: toInt() truncation safety, see note for "size" arg checking above. + MappedByteBufferImpl(mode, position, size.toInt, fd) } - override def position(offset: Long): FileChannel = { + // change position, even in APPEND mode. Use _carefully_. + private def compelPosition(offset: Long): FileChannel = { if (isWindows) FileApi.SetFilePointerEx( fd.handle, @@ -137,7 +213,18 @@ private[java] final class FileChannelImpl( null, FILE_BEGIN ) - else unistd.lseek(fd.fd, offset, stdio.SEEK_SET) + else { + val pos = unistd.lseek(fd.fd, offset, stdio.SEEK_SET) + if (pos < 0) + throwPosixException("lseek") + } + + this + } + + override def position(offset: Long): FileChannel = { + if (!openForAppending) + compelPosition(offset) this } @@ -152,7 +239,10 @@ private[java] final class FileChannelImpl( ) !filePointer } else { - unistd.lseek(fd.fd, 0, stdio.SEEK_CUR).toLong + val pos = unistd.lseek(fd.fd, 0, stdio.SEEK_CUR).toLong + if (pos < 0) + throwPosixException("lseek") + pos } override def read( @@ -244,8 +334,7 @@ private[java] final class FileChannelImpl( } else { val readCount = unistd.read(fd.fd, buf, count.toUInt) if (readCount == 0) { - // end of file - -1 + -1 // end of file } else if (readCount < 0) { // negative value (typically -1) indicates that read failed throw UnixException(file.fold("")(_.toString), errno.errno) @@ -262,8 +351,9 @@ private[java] final class FileChannelImpl( if (GetFileSizeEx(fd.handle, size)) (!size).toLong else 0L } else { + val curPosition = unistd.lseek(fd.fd, 0L, stdio.SEEK_CUR) val size = unistd.lseek(fd.fd, 0L, stdio.SEEK_END); - unistd.lseek(fd.fd, 0L, stdio.SEEK_CUR) + unistd.lseek(fd.fd, curPosition, stdio.SEEK_SET) size } } @@ -292,134 +382,302 @@ private[java] final class FileChannelImpl( nb } - override def truncate(size: Long): FileChannel = - if (!openForWriting) { - throw new IOException("Invalid argument") + private def lengthen(newFileSize: Long): Unit = { + /* Preconditions: only caller, this.map(), has ensured: + * - newFileSize > currentSize + * - file was opened for writing. + * - "this" channel is open + */ + if (!isWindows) { + val status = unistd.ftruncate(fd.fd, newFileSize) + if (status < 0) + throwPosixException("ftruncate") } else { - ensureOpen() val currentPosition = position() + val hasSucceded = - if (isWindows) { + FileApi.SetFilePointerEx( + fd.handle, + newFileSize, + null, + FILE_BEGIN + ) && + FileApi.SetEndOfFile(fd.handle) + + if (!hasSucceded) + throw new IOException("Failed to lengthen file") + + /* Windows doc states that the content of the bytes between the + * currentPosition and the new end of file is undefined. + * In practice, NTFS will zero those bytes. The next step is redundant + * if one is _sure_ the file system is NTFS. + * + * Write a single byte to just before EOF to convince the + * Windows file systems to actualize and zero the undefined blocks. + */ + write(ByteBuffer.wrap(Array[Byte](0.toByte)), newFileSize - 1) + + position(currentPosition) + } + + /* This next step may not be strictly necessary; it is included for the + * sake of robustness across as yet unseen Operating & File systems. + * The sync can be re-visited and micro-optimized if performance becomes a + * concern. + * + * Most contemporary Operating and File systems will have ensured that + * the changes above are in non-volatile storage by the time execution + * reaches here. + * + * Give those corner cases where this is not so a strong hint that it + * should be. If the data is already non-volatile, this should be as + * fast as a kernel call can be. + */ + force(true) + } + + override def truncate(newSize: Long): FileChannel = { + if (newSize < 0) + throw new IllegalArgumentException("Negative size") + + ensureOpen() + + if (!openForWriting) + throw new NonWritableChannelException() + + val currentPosition = position() + + if (newSize < size()) { + if (isWindows) { + val hasSucceded = FileApi.SetFilePointerEx( fd.handle, - size, + newSize, null, FILE_BEGIN ) && - FileApi.SetEndOfFile(fd.handle) - } else { - unistd.ftruncate(fd.fd, size) == 0 - } - if (!hasSucceded) { - throw new IOException("Failed to truncate file") + FileApi.SetEndOfFile(fd.handle) + if (!hasSucceded) + throw new IOException("Failed to truncate file") + } else { + val err = unistd.ftruncate(fd.fd, newSize) + if (err != 0) + throwPosixException("ftruncate") } - if (currentPosition > size) position(size) - else position(currentPosition) - this - } - override def write( - buffers: Array[ByteBuffer], - offset: Int, - length: Int - ): Long = { - ensureOpen() - var i = 0 - while (i < length) { - write(buffers(offset + i)) - i += 1 } - i - } - override def write(buffer: ByteBuffer, pos: Long): Int = { - ensureOpen() - position(pos) - val srcPos: Int = buffer.position() - val srcLim: Int = buffer.limit() - val lim = math.abs(srcLim - srcPos) - val bytes = if (buffer.hasArray()) { - buffer.array() - } else { - val bytes = new Array[Byte](lim) - buffer.get(bytes) - bytes - } - write(bytes, 0, lim) - buffer.position(srcPos + lim) - lim + if (currentPosition > newSize) + compelPosition(newSize) + + this } - override def write(src: ByteBuffer): Int = - write(src, position()) + private def writeArray( + array: Array[Byte], + offset: Int, + count: Int + ): Int = { + // Precondition: caller has checked arguments. - private def ensureOpen(): Unit = - if (!isOpen()) throw new ClosedChannelException() + val nWritten = + if (count == 0) 0 + else { + // we use the runtime knowledge of the array layout to avoid an + // intermediate buffer, and read straight from the array memory. + val buf = array.at(offset) + + if (isWindows) { + val hasSucceded = + WriteFile(fd.handle, buf, count.toUInt, null, null) + if (!hasSucceded) { + throw WindowsException.onPath( + file.fold("")(_.toString) + ) + } + + count // Windows will fail on partial write, so nWritten == count + } else { + // unix-like may do partial writes, so be robust to them. + val writeCount = unistd.write(fd.fd, buf, count.toUInt) + if (writeCount < 0) { + // negative value (typically -1) indicates that write failed + throw UnixException(file.fold("")(_.toString), errno.errno) + } + + writeCount // may be < requested count + } + } + + nWritten + } + + // since all of java package can call this, be stricter with argument checks. private[java] def write( buffer: Array[Byte], offset: Int, count: Int - ): Unit = { - if (buffer == null) { - throw new NullPointerException - } - if (offset < 0 || count < 0 || count > buffer.length - offset) { + ): Int = { + Objects.requireNonNull(buffer, "buffer") + + if ((offset < 0) || (count < 0) || (count > buffer.length - offset)) throw new IndexOutOfBoundsException - } - if (count == 0) { - return - } - // we use the runtime knowledge of the array layout to avoid - // intermediate buffer, and read straight from the array memory - val buf = buffer.at(offset) - if (isWindows) { - val hasSucceded = - WriteFile(fd.handle, buf, count.toUInt, null, null) - if (!hasSucceded) { - throw WindowsException.onPath( - file.fold("")(_.toString) - ) - } - } else { - val writeCount = unistd.write(fd.fd, buf, count.toUInt) + writeArray(buffer, offset, count) + } - if (writeCount < 0) { - // negative value (typically -1) indicates that write failed - throw UnixException(file.fold("")(_.toString), errno.errno) - } + private def writeByteBuffer(src: ByteBuffer): Int = { + // Precondition: caller has ensured that channel is open and open for write + val srcPos = src.position() + val srcLim = src.limit() + val nBytes = srcLim - srcPos // number of bytes in range. + + val (arr, offset) = if (src.hasArray()) { + (src.array(), srcPos) + } else { + val ba = new Array[Byte](nBytes) + src.get(ba, srcPos, nBytes) + (ba, 0) } + + val nWritten = writeArray(arr, offset, nBytes) + + /* Advance the srcPos only by the number of bytes actually written. + * This allows higher level callers to re-try partial writes + * in a 'natural' manner (no buffer futzing required). + */ + src.position(srcPos + nWritten) + + nWritten } - def available(): Int = { - if (isWindows) { - val currentPosition, lastPosition = stackalloc[windows.LargeInteger]() - SetFilePointerEx( - fd.handle, - distanceToMove = 0, - newFilePointer = currentPosition, - moveMethod = FILE_CURRENT - ) - SetFilePointerEx( - fd.handle, - distanceToMove = 0, - newFilePointer = lastPosition, - moveMethod = FILE_END - ) - SetFilePointerEx( - fd.handle, - distanceToMove = !currentPosition, - newFilePointer = null, - moveMethod = FILE_BEGIN - ) + override def write( + srcs: Array[ByteBuffer], + offset: Int, + length: Int + ): Long = { - (!lastPosition - !currentPosition).toInt - } else { - val currentPosition = unistd.lseek(fd.fd, 0, stdio.SEEK_CUR) - val lastPosition = unistd.lseek(fd.fd, 0, stdio.SEEK_END) - unistd.lseek(fd.fd, currentPosition, stdio.SEEK_SET) - (lastPosition - currentPosition).toInt + Objects.requireNonNull(srcs, "srcs") + + if ((offset < 0) || + (offset > srcs.length) || + (length < 0) || + (length > srcs.length - offset)) + throw new IndexOutOfBoundsException + + ensureOpenForWrite() + + var totalWritten = 0 + + var partialWriteSeen = false + var j = 0 + + while ((j < length) && !partialWriteSeen) { + val src = srcs(j) + val srcPos = src.position() + val srcLim = src.limit() + val nExpected = srcLim - srcPos // number of bytes in range. + + val nWritten = writeByteBuffer(src) + + totalWritten += nWritten + if (nWritten < nExpected) + partialWriteSeen = true + + j += 1 } + + totalWritten + } + + /* Write to absolute position, do not change current position. + * + * Understanding "does not change current position" when the channel + * has been opened requires some mind_bending/understanding. + * + * "Current position" when file has been opened for APPEND is + * a logical place, End of File (EOF), not an absolute number. + * When APPEND mode changes the position it reports as "current" to the + * new EOF rather than stashed position, according to JVM is is not + * really changing the "current position". + */ + override def write(src: ByteBuffer, pos: Long): Int = { + ensureOpenForWrite() + val stashPosition = position() + compelPosition(pos) + + val nBytesWritten = writeByteBuffer(src) + + if (!openForAppending) + compelPosition(stashPosition) + else + seekEOF() + + nBytesWritten + } + + // Write relative to current position (SEEK_CUR) or, for APPEND, SEEK_END. + override def write(src: ByteBuffer): Int = { + ensureOpenForWrite() + writeByteBuffer(src) + } + + /* The Scala Native implementation of FileInputStream#available delegates + * to this method. This method now implements "available()" as described in + * the Java description of FileInputStream#available. So the delegator + * now matches the its JDK description and behavior (Issue 3333). + * + * There are a couple of fine points to this implemention which might + * be helpful to know: + * 1) There is no requirement that this method itself not block. + * Indeed, depending upon what, if anything, is in the underlying + * file system cache, this method may do so. + * + * The current position should already be in the underlying OS fd but + * calling "size()" may require reading an inode or equivalent. + * + * 2) Given JVM actual behavior, the "read (or skipped over) from this + * input stream without blocking" clause of the JDK description might + * be better read as "without blocking for additional data bytes". + * + * A "skip()" should be a fast update of existing memory. Conceptually, + * and by JDK definition FileChannel "read()"s may block transferring + * bytes from slow storage to memory. Where is io_uring() when + * you need it? + * + * 3) The value returned is exactly the "estimate" portion of the JDK + * description: + * + * - All bets are off is somebody, even this thread, decreases + * size of the file in the interval between when "available()" + * returns and "read()" is called. + * + * - This method is defined in FileChannel#available as returning + * an Int. This also matches the use above in the Windows + * implementation of the private method + * "read(buffer: Array[Byte], offset: Int, count: Int)" + * Trace the count argument logic. + * + * FileChannel defines "position()" and "size()" as Long values. + * For large files and positions < Integer.MAX_VALUE, + * The Long difference "lastPosition - currentPosition" might well + * be greater than Integer.MAX_VALUE. In that case, the .toInt + * truncation will return the low estimate of Integer.MAX_VALUE + * not the true (Long) value. Matches the specification, but gotcha! + */ + + // local API extension + private[java] def available(): Int = { + ensureOpen() + + val currentPosition = position() + val lastPosition = size() + + val nAvailable = + if (currentPosition >= lastPosition) 0 + else lastPosition - currentPosition + + nAvailable.toInt } } diff --git a/javalib/src/main/scala/java/nio/file/FileSystemAlreadyExistsException.scala b/javalib/src/main/scala/java/nio/file/FileSystemAlreadyExistsException.scala new file mode 100644 index 0000000000..c6a318c1d1 --- /dev/null +++ b/javalib/src/main/scala/java/nio/file/FileSystemAlreadyExistsException.scala @@ -0,0 +1,8 @@ +package java.nio.file + +class FileSystemAlreadyExistsException(message: String, cause: Throwable) + extends RuntimeException(message, cause) { + def this(message: String) = this(message, null) + def this(cause: Throwable) = this(null, cause) + def this() = this(null, null) +} diff --git a/javalib/src/main/scala/java/nio/file/Files.scala b/javalib/src/main/scala/java/nio/file/Files.scala index fd3567bde8..2e65a32e72 100644 --- a/javalib/src/main/scala/java/nio/file/Files.scala +++ b/javalib/src/main/scala/java/nio/file/Files.scala @@ -898,9 +898,16 @@ object Files { maxDepth: Int, options: Array[FileVisitOption] ): Stream[Path] = { + if (maxDepth < 0) + throw new IllegalArgumentException("'maxDepth' is negative") + val visited = new HashSet[Path]() visited.add(start) - walk(start, maxDepth, 0, options, visited) + + /* To aid debugging, keep maxDepth and currentDepth sensibly related. + * if maxDepth == 0, start currentDepth at zero, else start at 1. + */ + walk(start, maxDepth, Math.min(maxDepth, 1), options, visited) } private def walk( @@ -911,7 +918,7 @@ object Files { visited: Set[Path] // Java Set, gets mutated. Private so no footgun. ): Stream[Path] = { /* Design Note: - * This implementation is an update to Java streams of this historical + * This implementation is an update to Java streams of the historical * Scala stream implementation. It is somewhat inefficient/costly * in that it converts known single names to a singleton Stream * and then relies upon flatmap() to merge streams. Creating a @@ -926,9 +933,10 @@ object Files { * probably the most economic approach, once the problem is described. */ - if (!isDirectory(start, linkOptsFromFileVisitOpts(options))) + if (!isDirectory(start, linkOptsFromFileVisitOpts(options)) || + (maxDepth == 0)) { Stream.of(start) - else { + } else { Stream.concat( Stream.of(start), Arrays @@ -981,9 +989,13 @@ object Files { options: Set[FileVisitOption], maxDepth: Int, visitor: FileVisitor[_ >: Path] - ): Path = + ): Path = { + if (maxDepth < 0) + throw new IllegalArgumentException("'maxDepth' is negative") + try _walkFileTree(start, options, maxDepth, visitor) catch { case TerminateTraversalException => start } + } // The sense of how LinkOption follows links or not is somewhat // inverted because of a double negative. The absense of @@ -1007,7 +1019,11 @@ object Files { val dirsToSkip = new HashSet[Path] val openDirs = scala.collection.mutable.Stack.empty[Path] - val stream = walk(start, maxDepth, 0, optsArray, new HashSet[Path]) + /* To aid debugging, keep maxDepth and currentDepth sensibly related. + * if maxDepth == 0, start currentDepth at zero, else start at 1. + */ + val stream = + walk(start, maxDepth, Math.min(maxDepth, 1), optsArray, new HashSet[Path]) stream.forEach { p => val parent = p.getParent() diff --git a/javalib/src/main/scala/java/nio/file/ProviderNotFoundException.scala b/javalib/src/main/scala/java/nio/file/ProviderNotFoundException.scala new file mode 100644 index 0000000000..e4d27a6fdc --- /dev/null +++ b/javalib/src/main/scala/java/nio/file/ProviderNotFoundException.scala @@ -0,0 +1,8 @@ +package java.nio.file + +class ProviderNotFoundException(message: String, cause: Throwable) + extends RuntimeException(message, cause) { + def this(message: String) = this(message, null) + def this(cause: Throwable) = this(null, cause) + def this() = this(null, null) +} diff --git a/javalib/src/main/scala/java/util/AbstractList.scala b/javalib/src/main/scala/java/util/AbstractList.scala index e256825e66..3c737e7bca 100644 --- a/javalib/src/main/scala/java/util/AbstractList.scala +++ b/javalib/src/main/scala/java/util/AbstractList.scala @@ -148,9 +148,12 @@ abstract class AbstractList[E] protected () } protected def removeRange(fromIndex: Int, toIndex: Int): Unit = { - var i = 0 + // JVM documents fromIndex as inclusive, toIndex as exclusive. + // Someday the arguments should be bounds checked. + var i = fromIndex val iter = listIterator(fromIndex) - while (iter.hasNext() && i <= toIndex) { + while (iter.hasNext() && (i < toIndex)) { + iter.next() iter.remove() i += 1 } diff --git a/javalib/src/main/scala/java/util/ArrayList.scala b/javalib/src/main/scala/java/util/ArrayList.scala index 6f920c6d16..60ba9c6a9c 100644 --- a/javalib/src/main/scala/java/util/ArrayList.scala +++ b/javalib/src/main/scala/java/util/ArrayList.scala @@ -197,12 +197,14 @@ class ArrayList[E] private ( * collections. */ + // Flaw: This method makes no attempt to detect ConcurrentModification. + new Spliterators.AbstractSpliterator[E]( _size, - Spliterator.SIZED | Spliterator.SUBSIZED + Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.ORDERED ) { private var cursor = 0 - private val limit = _size + private lazy val limit = _size // late binding def tryAdvance(action: Consumer[_ >: E]): Boolean = { if (cursor >= limit) false diff --git a/javalib/src/main/scala/java/util/Arrays.scala b/javalib/src/main/scala/java/util/Arrays.scala index d878b3495f..1dc8dad02d 100644 --- a/javalib/src/main/scala/java/util/Arrays.scala +++ b/javalib/src/main/scala/java/util/Arrays.scala @@ -1,6 +1,11 @@ // Ported from Scala.js commit: ba618ed dated: 2020-10-05 -// Arrays.spliterator() methods added for Scala Native. -// Arrays.stream() methods added for Scala Native. + +/* + Arrays.spliterator() methods added for Scala Native. + Arrays.stream() methods added for Scala Native. + Arrays.setAll*() methods added for Scala Native. + Arrays.parallel*() methods added for Scala Native. + */ package java.util @@ -8,6 +13,7 @@ import scala.annotation.tailrec import scala.reflect.ClassTag +import java.util.function._ import java.{util => ju} import java.util.stream.StreamSupport @@ -1002,7 +1008,259 @@ object Arrays { } // Scala Native additions -------------------------------------------------- - import java.util.{Spliterator, Spliterators} + + /* Note: + * For now all of parallelPrefix(), parallelSetAll() and parallelSort() + * methods are restricted to a parallelism of 1, i.e. sequential. + * + * Later evolutions could/should increase the parallelism when + * multithreading has been enabled. + */ + + def parallelPrefix(array: Array[Double], op: DoubleBinaryOperator): Unit = { + parallelPrefix(array, 0, array.length, op) + } + + def parallelPrefix( + array: Array[Double], + fromIndex: Int, + toIndex: Int, + op: DoubleBinaryOperator + ): Unit = { + checkRangeIndices(array, fromIndex, toIndex) + val rangeSize = toIndex - fromIndex + + if (rangeSize >= 2) { // rangeSize == 0 or 1 leaves array unmodified. + for (j <- (fromIndex + 1) until toIndex) { + array(j) = op.applyAsDouble(array(j - 1), array(j)) + } + } + } + + def parallelPrefix(array: Array[Int], op: IntBinaryOperator): Unit = { + parallelPrefix(array, 0, array.length, op) + } + + def parallelPrefix( + array: Array[Int], + fromIndex: Int, + toIndex: Int, + op: IntBinaryOperator + ): Unit = { + checkRangeIndices(array, fromIndex, toIndex) + val rangeSize = toIndex - fromIndex + + if (rangeSize >= 2) { // rangeSize == 0 or 1 leaves array unmodified. + for (j <- (fromIndex + 1) until toIndex) { + array(j) = op.applyAsInt(array(j - 1), array(j)) + } + } + } + + def parallelPrefix(array: Array[Long], op: LongBinaryOperator): Unit = { + parallelPrefix(array, 0, array.length, op) + } + + def parallelPrefix( + array: Array[Long], + fromIndex: Int, + toIndex: Int, + op: LongBinaryOperator + ): Unit = { + checkRangeIndices(array, fromIndex, toIndex) + val rangeSize = toIndex - fromIndex + + if (rangeSize >= 2) { // rangeSize == 0 or 1 leaves array unmodified. + for (j <- (fromIndex + 1) until toIndex) { + array(j) = op.applyAsLong(array(j - 1), array(j)) + } + } + } + + def parallelPrefix[T <: AnyRef]( + array: Array[T], + op: BinaryOperator[T] + ): Unit = { + parallelPrefix[T](array, 0, array.length, op) + } + + def parallelPrefix[T <: AnyRef]( + array: Array[T], + fromIndex: Int, + toIndex: Int, + op: BinaryOperator[T] + ): Unit = { + checkRangeIndices(array, fromIndex, toIndex) + val rangeSize = toIndex - fromIndex + + if (rangeSize >= 2) { // rangeSize == 0 or 1 leaves array unmodified. + for (j <- (fromIndex + 1) until toIndex) { + array(j) = op.apply(array(j - 1), array(j)) + } + } + } + + def parallelSetAll( + array: Array[Double], + generator: IntToDoubleFunction + ): Unit = { + setAll(array, generator) + } + + def parallelSetAll(array: Array[Int], generator: IntUnaryOperator): Unit = { + setAll(array, generator) + } + + def parallelSetAll(array: Array[Long], generator: IntToLongFunction): Unit = { + setAll(array, generator) + } + + def parallelSetAll[T <: AnyRef]( + array: Array[T], + generator: IntFunction[_ <: T] + ): Unit = { + setAll(array, generator) + } + +// parallelSort(byte[]) + def parallelSort(a: Array[Byte]): Unit = + sort(a) + +// parallelSort(byte[] a, int fromIndex, int toIndex) + def parallelSort( + a: Array[Byte], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(a, fromIndex, toIndex) + +// parallelSort(char[]) + def parallelSort(a: Array[Char]): Unit = + sort(a) + +// parallelSort(char[] a, int fromIndex, int toIndex) + def parallelSort( + a: Array[Char], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(a, fromIndex, toIndex) + +// parallelSort(double[]) + def parallelSort(array: Array[Double]): Unit = + sort(array) + +// parallelSort(double[] a, int fromIndex, int toIndex) + def parallelSort( + array: Array[Double], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(array, fromIndex, toIndex) + +// parallelSort(float[]) + def parallelSort(a: Array[Float]): Unit = + sort(a) + +// parallelSort(float[] a, int fromIndex, int toIndex) + def parallelSort( + a: Array[Float], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(a, fromIndex, toIndex) + +// parallelSort(int[]) + def parallelSort(a: Array[Int]): Unit = + sort(a) + +// parallelSort(int[] a, int fromIndex, int toIndex) + def parallelSort(a: Array[Int], fromIndex: Int, toIndex: Int): Unit = + sort(a, fromIndex, toIndex) + +// parallelSort(long[]) + def parallelSort(a: Array[Long]): Unit = + sort(a) +// parallelSort(long[] a, int fromIndex, int toIndex) + def parallelSort( + a: Array[Long], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(a, fromIndex, toIndex) + +// parallelSort(short[]) + def parallelSort(a: Array[Short]): Unit = + sort(a) + +// parallelSort(short[] a, int fromIndex, int toIndex) + def parallelSort( + a: Array[Short], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(a, fromIndex, toIndex) + +// parallelSort(T[]) +// def parallelSort(a: Array[AnyRef]): Unit = +// sort(a) + +// def parallelSort[T <: Comparable[AnyRef]]( + def parallelSort[T <: Comparable[_ <: AnyRef]]( + array: Array[T] + ): Unit = { + sort(array.asInstanceOf[Array[AnyRef]]) + } + +// parallelSort(T[] a, Comparator cmp) + def parallelSort[T <: AnyRef]( + array: Array[T], + comparator: Comparator[_ >: T] + ): Unit = { + sort[T](array, comparator) + } + +// parallelSort(T[] a, int fromIndex, int toIndex) + def parallelSort[T <: Comparable[_ <: AnyRef]]( + array: Array[T], + fromIndex: Int, + toIndex: Int + ): Unit = + sort(array.asInstanceOf[Array[AnyRef]], fromIndex, toIndex) + +// parallelSort(T[] a, int fromIndex, int toIndex, Comparator cmp) + + def parallelSort[T <: AnyRef]( + array: Array[T], + fromIndex: Int, + toIndex: Int, + comparator: Comparator[_ >: T] + ): Unit = { + sort[T](array, fromIndex, toIndex, comparator) + } + + def setAll(array: Array[Double], generator: IntToDoubleFunction): Unit = { + for (j <- 0 until array.size) + array(j) = generator.applyAsDouble(j) + } + + def setAll(array: Array[Int], generator: IntUnaryOperator): Unit = { + for (j <- 0 until array.size) + array(j) = generator.applyAsInt(j) + } + + def setAll(array: Array[Long], generator: IntToLongFunction): Unit = { + for (j <- 0 until array.size) + array(j) = generator.applyAsLong(j) + } + + def setAll[T <: AnyRef]( + array: Array[T], + generator: IntFunction[_ <: T] + ): Unit = { + for (j <- 0 until array.size) + array(j) = generator.apply(j) + } private final val standardArraySpliteratorCharacteristics = Spliterator.SIZED | @@ -1182,4 +1440,5 @@ object Arrays { StreamSupport.stream(spliter, parallel = false) } + } diff --git a/javalib/src/main/scala/java/util/EnumSet.scala b/javalib/src/main/scala/java/util/EnumSet.scala index 3227bd37d6..2dc5b00390 100644 --- a/javalib/src/main/scala/java/util/EnumSet.scala +++ b/javalib/src/main/scala/java/util/EnumSet.scala @@ -2,27 +2,113 @@ package java.util import java.lang.Enum -final class EnumSet[E <: Enum[E]] private (values: Array[E]) +final class EnumSet[E <: Enum[E]] private (values: Set[E]) extends AbstractSet[E] with Cloneable with Serializable { - def iterator(): Iterator[E] = - new Iterator[E] { - private var i = 0 - override def hasNext(): Boolean = i < values.length - override def next(): E = { - val r = values(i) - i += 1 - r - } - override def remove(): Unit = throw new UnsupportedOperationException() - } - def size(): Int = values.length + // Unsupported requires reflection + // def this(elementType: Class[E], universe: Array[Enum[E]]) = ??? + + override def iterator(): Iterator[E] = values.iterator() + override def size(): Int = values.size() + override def isEmpty(): Boolean = values.isEmpty() + override def contains(o: Any): Boolean = values.contains(o) + override def toArray(): Array[AnyRef] = values.toArray() + override def toArray[T <: AnyRef](a: Array[T]): Array[T] = values.toArray(a) + override def add(e: E): Boolean = values.add(e) + override def remove(o: Any): Boolean = values.remove(o) + override def containsAll(c: Collection[_]): Boolean = values.containsAll(c) + override def addAll(c: Collection[_ <: E]): Boolean = values.addAll(c) + override def removeAll(c: Collection[_]): Boolean = values.removeAll(c) + override def retainAll(c: Collection[_]): Boolean = values.retainAll(c) + override def clear(): Unit = values.clear() + override def equals(o: Any): Boolean = values.equals(o) + override def hashCode(): Int = values.hashCode() + + override protected[util] def clone(): EnumSet[E] = + super.clone().asInstanceOf[EnumSet[E]] } object EnumSet { - def noneOf[E <: Enum[E]: scala.reflect.ClassTag]( - elementType: Class[E] - ): EnumSet[E] = - new EnumSet[E](Array.empty[E]) + def noneOf[E <: Enum[E]](elementType: Class[E]): EnumSet[E] = + new EnumSet[E](new HashSet[E]()) + + // Unsupported, requires reflection + // def allOf[E <: Enum[E]](elementType: Class[E]): EnumSet[E] = ??? + + def copyOf[E <: Enum[E]](s: EnumSet[E]): EnumSet[E] = + s.clone().asInstanceOf[EnumSet[E]] + + def copyOf[E <: Enum[E]](c: Collection[E]): EnumSet[E] = c match { + case c: EnumSet[E] => copyOf(c) + case c => + if (c.isEmpty()) throw new IllegalArgumentException("Collection is empty") + val i = c.iterator() + val set = EnumSet.of(i.next()) + while (i.hasNext()) { + set.add(i.next()) + } + set + } + + // Unsupported, requires reflection + // def complementOf[E <: Enum[E]](s: EnumSet[E]): EnumSet[E] = { + // val result = copyOf(s) + // result.complement() + // result + // } + + def of[E <: Enum[E]](e: E): EnumSet[E] = { + val s = emptySetOf(e) + s.add(e) + s + } + + def of[E <: Enum[E]](e1: E, e2: E): EnumSet[E] = { + val s = emptySetOf(e1) + s.add(e1) + s.add(e2) + s + } + + def of[E <: Enum[E]](e1: E, e2: E, e3: E): EnumSet[E] = { + val s = emptySetOf(e1) + s.add(e1) + s.add(e2) + s.add(e3) + s + } + + def of[E <: Enum[E]](e1: E, e2: E, e3: E, e4: E): EnumSet[E] = { + val s = emptySetOf(e1) + s.add(e1) + s.add(e2) + s.add(e3) + s.add(e4) + s + } + + def of[E <: Enum[E]](e1: E, e2: E, e3: E, e4: E, e5: E): EnumSet[E] = { + val s = emptySetOf(e1) + s.add(e1) + s.add(e2) + s.add(e3) + s.add(e4) + s.add(e5) + s + } + + def of[E <: Enum[E]](first: E, rest: Array[E]): EnumSet[E] = { + val s = emptySetOf(first) + s.add(first) + rest.foreach(s.add) + s + } + + // Unsupported, requires reflection + // def range[E <: Enum[E]](from: E, to: E): EnumSet[E] = ??? + + @inline + private def emptySetOf[E <: Enum[E]](e: E): EnumSet[E] = + new EnumSet[E](new HashSet[E]()) } diff --git a/javalib/src/main/scala/java/util/LinkedList.scala b/javalib/src/main/scala/java/util/LinkedList.scala index 1086f30437..6214261359 100644 --- a/javalib/src/main/scala/java/util/LinkedList.scala +++ b/javalib/src/main/scala/java/util/LinkedList.scala @@ -387,6 +387,15 @@ class LinkedList[E]() override def clone(): AnyRef = new LinkedList[E](this) + + override def spliterator(): Spliterator[E] = { + // Report the ORDERED characteristic, same as the JVM. + Spliterators.spliterator[E]( + this, + Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.ORDERED + ) + } + } object LinkedList { diff --git a/javalib/src/main/scala/java/util/PrimitiveIterator.scala b/javalib/src/main/scala/java/util/PrimitiveIterator.scala index b1aff34597..9164f3ea6a 100644 --- a/javalib/src/main/scala/java/util/PrimitiveIterator.scala +++ b/javalib/src/main/scala/java/util/PrimitiveIterator.scala @@ -1,12 +1,14 @@ package java.util +import java.{lang => jl} + import java.util.function._ import Spliterator._ object PrimitiveIterator { - trait OfDouble extends PrimitiveIterator[Double, DoubleConsumer] { - override def forEachRemaining(action: Consumer[_ >: Double]): Unit = { + trait OfDouble extends PrimitiveIterator[jl.Double, DoubleConsumer] { + override def forEachRemaining(action: Consumer[_ >: jl.Double]): Unit = { Objects.requireNonNull(action) if (action.isInstanceOf[DoubleConsumer]) { @@ -38,8 +40,8 @@ object PrimitiveIterator { def nextDouble(): scala.Double // Abstract } - trait OfInt extends PrimitiveIterator[Int, IntConsumer] { - override def forEachRemaining(action: Consumer[_ >: Int]): Unit = { + trait OfInt extends PrimitiveIterator[jl.Integer, IntConsumer] { + override def forEachRemaining(action: Consumer[_ >: jl.Integer]): Unit = { Objects.requireNonNull(action) if (action.isInstanceOf[IntConsumer]) { @@ -64,8 +66,8 @@ object PrimitiveIterator { def nextInt(): Int // Abstract } - trait OfLong extends PrimitiveIterator[Long, LongConsumer] { - override def forEachRemaining(action: Consumer[_ >: Long]): Unit = { + trait OfLong extends PrimitiveIterator[jl.Long, LongConsumer] { + override def forEachRemaining(action: Consumer[_ >: jl.Long]): Unit = { Objects.requireNonNull(action) if (action.isInstanceOf[LongConsumer]) { forEachRemaining(action.asInstanceOf[LongConsumer]) diff --git a/javalib/src/main/scala/java/util/Random.scala b/javalib/src/main/scala/java/util/Random.scala index dd05be1ef0..04f1f0faaf 100644 --- a/javalib/src/main/scala/java/util/Random.scala +++ b/javalib/src/main/scala/java/util/Random.scala @@ -1,5 +1,9 @@ package java.util +import java.{lang => jl} +import java.util.function.DoubleConsumer +import java.util.stream.{StreamSupport, DoubleStream} + import scala.annotation.tailrec /** Ported from Apache Harmony and described by Donald E. Knuth in The Art of @@ -110,4 +114,121 @@ class Random(seed_in: Long) extends AnyRef with java.io.Serializable { // And return x*c x * c } + + private val invalidStreamSizeMsg = "size must be non-negative" + + // The elements of the stream are random, not the Characteristics themselves. + final val randomStreamCharacteristics = + Spliterator.SIZED | Spliterator.SUBSIZED | + Spliterator.NONNULL | Spliterator.IMMUTABLE // 0x4540, decimal 17728 + + // Algorithm from JDK 17 Random Class documentation. + private def nextDouble(origin: Double, bound: Double): Double = { + val r = nextDouble() * (bound - origin) + origin + + if (r >= bound) Math.nextDown(bound) // correct for rounding + else r + } + + /* The same algorithm is used in the three Random*Spliterator methods, + * specialized by type. This algorithm is heavily influenced by the + * public domain JSR-166 code in + * java.util.concurrent.ThreadLocalRandom.scala and bears a debt of + * gratitude to Doug Lea & Co. + */ + + final private class RandomDoublesSpliterator( + var index: Long, + fence: Long, + origin: Double, + bound: Double + ) extends Spliterator.OfDouble { + + override def trySplit(): RandomDoublesSpliterator = { + val m = (index + fence) >>> 1 + if (m <= index) null + else { + val i = index + index = m + new RandomDoublesSpliterator(i, m, origin, bound) + } + } + + override def estimateSize(): Long = fence - index + override def characteristics(): Int = randomStreamCharacteristics + + override def tryAdvance(consumer: DoubleConsumer): Boolean = { + if (consumer == null) + throw new NullPointerException + + if (index >= fence) false + else { + consumer.accept(nextDouble(origin, bound)) + index += 1 + true + } + } + + override def forEachRemaining(consumer: DoubleConsumer): Unit = { + if (consumer == null) + throw new NullPointerException + + if (index < fence) { + var i = index + index = fence + while ({ + consumer.accept(nextDouble(origin, bound)) + i += 1 + i < fence + }) () + } + } + } + + def doubles(): DoubleStream = { + val spliter = + new RandomDoublesSpliterator(0L, jl.Long.MAX_VALUE, 0.0, 1.0) + + StreamSupport.doubleStream(spliter, parallel = false) + } + + def doubles( + randomNumberOrigin: Double, + randomNumberBound: Double + ): DoubleStream = { + doubles(jl.Long.MAX_VALUE, randomNumberOrigin, randomNumberBound) + } + + def doubles(streamSize: Long): DoubleStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(invalidStreamSizeMsg) + + val spliter = + new RandomDoublesSpliterator(0L, streamSize, 0.0, 1.0) + + StreamSupport.doubleStream(spliter, parallel = false) + } + + def doubles( + streamSize: Long, + randomNumberOrigin: Double, + randomNumberBound: Double + ): DoubleStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(invalidStreamSizeMsg) + + if (!(randomNumberOrigin < randomNumberBound)) + throw new IllegalArgumentException("bound must be greater than origin") + + val spliter = + new RandomDoublesSpliterator( + 0L, + streamSize, + randomNumberOrigin, + randomNumberBound + ) + + StreamSupport.doubleStream(spliter, parallel = false) + } + } diff --git a/javalib/src/main/scala/java/util/StringJoiner.scala b/javalib/src/main/scala/java/util/StringJoiner.scala new file mode 100644 index 0000000000..6a417140c6 --- /dev/null +++ b/javalib/src/main/scala/java/util/StringJoiner.scala @@ -0,0 +1,91 @@ +// Ported from Scala.js commit: 57d71da dated: 2023-05-31 +// Extensively re-written for Scala Native. + +package java.util + +final class StringJoiner private ( + delimiter: String, + prefixLength: Integer, + suffix: String +) extends AnyRef { + + def this(delimiter: CharSequence) = this(delimiter.toString(), 0, "") + + def this( + delimiter: CharSequence, + prefix: CharSequence, + suffix: CharSequence + ) = { + this(delimiter.toString(), prefix.length(), suffix.toString()) + if (prefixLength > 0) + builder.append(prefix) + } + + private val delimLength = delimiter.length() + + /* Avoid early builder enlargeBuffer() calls. + * Add an arbitrary guestimate > default 16 excess capacity. + */ + private val builder = + new java.lang.StringBuilder(prefixLength + 40 + suffix.length()) + + /* The custom value to return if empty, set by `setEmptyValue` (nullable). + */ + private var emptyValue: String = null + + /* "true" before the first add(), even of "", or merge() of non-empty + * StringJoiner. See JDK StringJoiner documentation. + * + * A tricky bit: + * Adding an initial empty string ("") will set isEmpty to "false" but + * will not change builder.length(). Use former to determine when to + * use emptyValue or not. + */ + private var isEmpty = true + + private def appendStemTo(other: StringJoiner) = { + if (!isEmpty) // builder contains more than prefix, possibly only "". + other.add(this.builder.substring(prefixLength)) + } + + def setEmptyValue(emptyValue: CharSequence): StringJoiner = { + this.emptyValue = emptyValue.toString() + this + } + + override def toString(): String = { + if (isEmpty && (emptyValue != null)) emptyValue + else { + if (suffix.length == 0) + builder.toString() + else { // avoid an extra String allocation. + val len = builder.length() + builder.append(suffix) + val s = builder.toString() + builder.setLength(len) + s + } + } + } + + def add(newElement: CharSequence): StringJoiner = { + if (isEmpty) + isEmpty = false + else if (delimLength > 0) + builder.append(delimiter) + + builder.append(if (newElement == null) "null" else newElement) + this + } + + def merge(other: StringJoiner): StringJoiner = { + other.appendStemTo(this) + this + } + + def length(): Int = { + if (isEmpty && (emptyValue != null)) emptyValue.length() + else builder.length() + suffix.length() + } + +} diff --git a/javalib/src/main/scala/java/util/UUID.scala b/javalib/src/main/scala/java/util/UUID.scala index 4a3ced82ca..baab91b44c 100644 --- a/javalib/src/main/scala/java/util/UUID.scala +++ b/javalib/src/main/scala/java/util/UUID.scala @@ -1,15 +1,14 @@ +// Ported from Scala.js commit: e20d6d6 dated: 2023-07-19 + package java.util -import java.lang.{Long => JLong} import java.security.SecureRandom final class UUID private ( private val i1: Int, private val i2: Int, private val i3: Int, - private val i4: Int, - private[this] var l1: JLong, - private[this] var l2: JLong + private val i4: Int ) extends AnyRef with java.io.Serializable with Comparable[UUID] { @@ -35,23 +34,17 @@ final class UUID private ( (mostSigBits >>> 32).toInt, mostSigBits.toInt, (leastSigBits >>> 32).toInt, - leastSigBits.toInt, - mostSigBits, - leastSigBits + leastSigBits.toInt ) } - def getLeastSignificantBits(): Long = { - if (l2 eq null) - l2 = (i3.toLong << 32) | (i4.toLong & 0xffffffffL) - l2.longValue() - } + @inline + def getLeastSignificantBits(): Long = + (i3.toLong << 32) | (i4.toLong & 0xffffffffL) - def getMostSignificantBits(): Long = { - if (l1 eq null) - l1 = (i1.toLong << 32) | (i2.toLong & 0xffffffffL) - l1.longValue() - } + @inline + def getMostSignificantBits(): Long = + (i1.toLong << 32) | (i2.toLong & 0xffffffffL) def version(): Int = (i2 & 0xf000) >> 12 @@ -115,16 +108,20 @@ final class UUID private ( } def compareTo(that: UUID): Int = { - if (this.i1 != that.i1) { - if (this.i1 > that.i1) 1 else -1 - } else if (this.i2 != that.i2) { - if (this.i2 > that.i2) 1 else -1 - } else if (this.i3 != that.i3) { - if (this.i3 > that.i3) 1 else -1 - } else if (this.i4 != that.i4) { - if (this.i4 > that.i4) 1 else -1 + val thisHi = this.getMostSignificantBits() + val thatHi = that.getMostSignificantBits() + if (thisHi != thatHi) { + if (thisHi < thatHi) -1 + else 1 } else { - 0 + val thisLo = this.getLeastSignificantBits() + val thatLo = that.getLeastSignificantBits() + if (thisLo != thatLo) { + if (thisLo < thatLo) -1 + else 1 + } else { + 0 + } } } } @@ -187,7 +184,7 @@ object UUID { val i2 = parseHex8(name.substring(9, 13), name.substring(14, 18)) val i3 = parseHex8(name.substring(19, 23), name.substring(24, 28)) val i4 = parseHex8(name.substring(28, 32), name.substring(32, 36)) - new UUID(i1, i2, i3, i4, null, null) + new UUID(i1, i2, i3, i4) } catch { case _: NumberFormatException => fail() } diff --git a/javalib/src/main/scala/java/util/Vector.scala b/javalib/src/main/scala/java/util/Vector.scala new file mode 100644 index 0000000000..75d4e9c709 --- /dev/null +++ b/javalib/src/main/scala/java/util/Vector.scala @@ -0,0 +1,453 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package java.util + +import java.io._ +import java.util._ + +@SerialVersionUID(-2767605614048989439L) +object Vector { + private val DEFAULT_SIZE = 10 +} + +@SerialVersionUID(-2767605614048989439L) +class Vector[E <: AnyRef]( + initialCapacity: Int, + protected var capacityIncrement: Int +) extends AbstractList[E] + with List[E] + with RandomAccess + with Cloneable + with Serializable { + if (initialCapacity < 0) throw new IllegalArgumentException + + protected var elementCount = 0 + protected var elementData: Array[E] = newElementArray(initialCapacity) + + def this() = this(Vector.DEFAULT_SIZE, 0) + + def this(capacity: Int) = this(capacity, 0) + + def this(collection: Collection[_ <: E]) = { + this(collection.size(), 0) + val it = collection.iterator() + while (it.hasNext()) { + elementData(elementCount) = it.next() + elementCount += 1 + } + } + + private def newElementArray(size: Int): Array[E] = + new Array[AnyRef](size).asInstanceOf[Array[E]] + + override def add(location: Int, obj: E): Unit = + insertElementAt(obj, location) + + override def add(obj: E): Boolean = synchronized { + if (elementCount == elementData.length) growByOne() + elementData(elementCount) = obj + elementCount += 1 + true + } + + override def addAll( + _location: Int, + collection: Collection[_ <: E] + ): Boolean = synchronized { + var location = _location + if (0 <= location && location <= elementCount) { + val size = collection.size() + if (size == 0) return false + val required = size - (elementData.length - elementCount) + if (required > 0) growBy(required) + val count = elementCount - location + if (count > 0) + System.arraycopy( + elementData, + location, + elementData, + location + size, + count + ) + val it = collection.iterator() + while (it.hasNext()) { + elementData(location) = it.next() + location += 1 + } + elementCount += size + return true + } + throw new ArrayIndexOutOfBoundsException(location) + } + + override def addAll(collection: Collection[_ <: E]): Boolean = synchronized { + addAll(elementCount, collection) + } + + def addElement(obj: E): Unit = synchronized { + if (elementCount == elementData.length) growByOne() + elementData(elementCount) = obj + elementCount += 1 + } + + def capacity: Int = synchronized { elementData.length } + + override def clear(): Unit = removeAllElements() + + override def clone: AnyRef = try + synchronized { + val vector = super.clone.asInstanceOf[Vector[E]] + vector.elementData = elementData.clone + vector + } + catch { + case e: CloneNotSupportedException => null + } + + override def contains(obj: Any): Boolean = + indexOf(obj.asInstanceOf[AnyRef], 0) != -1 + + override def containsAll(collection: Collection[_]): Boolean = synchronized { + super.containsAll(collection) + } + + def copyInto(elements: Array[AnyRef]): Unit = synchronized { + System.arraycopy(elementData, 0, elements, 0, elementCount) + } + + def elementAt(location: Int): E = synchronized { + if (location < elementCount) elementData(location).asInstanceOf[E] + else throw new ArrayIndexOutOfBoundsException(location) + } + + def elements: Enumeration[E] = new Enumeration[E]() { + private[util] var pos = 0 + + override def hasMoreElements(): Boolean = pos < elementCount + + override def nextElement(): E = Vector.this.synchronized { + if (pos < elementCount) { + val elem = elementData(pos) + pos += 1 + elem.asInstanceOf[E] + } else throw new NoSuchElementException + } + } + + def ensureCapacity(minimumCapacity: Int): Unit = synchronized { + if (elementData.length < minimumCapacity) { + val next = (if (capacityIncrement <= 0) elementData.length + else capacityIncrement) + elementData.length + grow( + if (minimumCapacity > next) minimumCapacity + else next + ) + } + } + + override def equals(obj: Any): Boolean = obj match { + case obj: List[_] => + if (this eq obj) return true + synchronized { + val list = obj.asInstanceOf[List[_]] + if (list.size() != elementCount) return false + var index = 0 + val it = list.iterator() + while (it.hasNext()) { + val e1 = elementData({ + index += 1; index - 1 + }) + val e2 = it.next() + if (!(if (e1 == null) e2 == null + else e1 == (e2))) return false + } + } + true + case _ => false + } + + def firstElement: E = synchronized { + if (elementCount > 0) return elementData(0).asInstanceOf[E] + throw new NoSuchElementException + } + + override def get(location: Int): E = elementAt(location) + + private def grow(newCapacity: Int): Unit = { + val newData = newElementArray(newCapacity) + // Assumes elementCount is <= newCapacity + assert(elementCount <= newCapacity) + System.arraycopy(elementData, 0, newData, 0, elementCount) + elementData = newData + } + + private def growByOne(): Unit = { + var adding = 0 + if (capacityIncrement <= 0) { + adding = elementData.length + if (adding == 0) adding = 1 + } else adding = capacityIncrement + assert(adding > 0) + val newData = newElementArray(elementData.length + adding) + System.arraycopy(elementData, 0, newData, 0, elementCount) + elementData = newData + } + + private def growBy(required: Int): Unit = { + var adding = 0 + if (capacityIncrement <= 0) { + adding = elementData.length + if (adding == 0) adding = required + while (adding < required) adding += adding + } else { + adding = (required / capacityIncrement) * capacityIncrement + if (adding < required) adding += capacityIncrement + } + val newData = newElementArray(elementData.length + adding) + System.arraycopy(elementData, 0, newData, 0, elementCount) + elementData = newData + } + + override def hashCode: Int = synchronized { + var result = 1 + for (i <- 0 until elementCount) { + result = (31 * result) + (if (elementData(i) == null) 0 + else elementData(i).hashCode) + } + result + } + + override def indexOf(obj: Any): Int = indexOf(obj, 0) + + def indexOf(obj: Any, location: Int): Int = synchronized { + var i = location + while (i < elementCount) { + if (obj == elementData(i)) return i + i += 1 + } + -1 + } + + def insertElementAt(obj: E, location: Int): Unit = synchronized { + if (0 <= location && location <= elementCount) { + if (elementCount == elementData.length) growByOne() + val count = elementCount - location + if (count > 0) + System.arraycopy( + elementData, + location, + elementData, + location + 1, + count + ) + elementData(location) = obj + elementCount += 1 + } else throw new ArrayIndexOutOfBoundsException(location) + } + + override def isEmpty(): Boolean = synchronized { elementCount == 0 } + + def lastElement: E = + try synchronized { elementData(elementCount - 1).asInstanceOf[E] } + catch { + case e: IndexOutOfBoundsException => throw new NoSuchElementException + } + + override def lastIndexOf(obj: Any): Int = synchronized { + lastIndexOf(obj, elementCount - 1) + } + + def lastIndexOf(obj: Any, location: Int): Int = synchronized { + if (location < elementCount) { + var i = location + while (i >= 0) { + if (obj == elementData(i)) return i + i -= 1 + } + -1 + } else throw new ArrayIndexOutOfBoundsException(location) + } + + override def remove(location: Int): E = synchronized { + if (location < elementCount) { + val result = elementData(location).asInstanceOf[E] + elementCount -= 1 + val size = elementCount - location + if (size > 0) + System.arraycopy(elementData, location + 1, elementData, location, size) + elementData(elementCount) = null.asInstanceOf[E] + return result + } + throw new ArrayIndexOutOfBoundsException(location) + } + + override def remove(obj: Any): Boolean = removeElement(obj) + + override def removeAll(collection: Collection[_]): Boolean = synchronized { + super.removeAll(collection) + } + + def removeAllElements(): Unit = synchronized { + for (i <- 0 until elementCount) { + elementData(i) = null.asInstanceOf[E] + } + elementCount = 0 + } + + def removeElement(obj: Any): Boolean = synchronized { + val index = indexOf(obj.asInstanceOf[AnyRef], 0) + if (index == -1) false + else { + removeElementAt(index) + true + } + } + + def removeElementAt(location: Int): Unit = synchronized { + if (0 <= location && location < elementCount) { + elementCount -= 1 + val size = elementCount - location + if (size > 0) + System.arraycopy(elementData, location + 1, elementData, location, size) + elementData(elementCount) = null.asInstanceOf[E] + } else throw new ArrayIndexOutOfBoundsException(location) + } + + override protected def removeRange(start: Int, end: Int): Unit = { + if (start >= 0 && start <= end && end <= elementCount) { + if (start == end) () + else if (end != elementCount) { + System.arraycopy( + elementData, + end, + elementData, + start, + elementCount - end + ) + val newCount = elementCount - (end - start) + Arrays.fill( + elementData.asInstanceOf[Array[AnyRef]], + newCount, + elementCount, + null + ) + elementCount = newCount + } else { + Arrays.fill( + elementData.asInstanceOf[Array[AnyRef]], + start, + elementCount, + null + ) + elementCount = start + } + } else throw new IndexOutOfBoundsException + } + + override def retainAll(collection: Collection[_]): Boolean = synchronized { + super.retainAll(collection) + } + + override def set( + location: Int, + obj: E + ): E = synchronized { + if (location < elementCount) { + val result = elementData(location).asInstanceOf[E] + elementData(location) = obj + return result + } + throw new ArrayIndexOutOfBoundsException(location) + } + + def setElementAt(obj: E, location: Int): Unit = synchronized { + if (location < elementCount) elementData(location) = obj + else throw new ArrayIndexOutOfBoundsException(location) + } + + def setSize(length: Int): Unit = synchronized { + if (length == elementCount) return ensureCapacity(length) + if (elementCount > length) + Arrays.fill( + elementData.asInstanceOf[Array[AnyRef]], + length, + elementCount, + null + ) + elementCount = length + } + + override def size(): Int = synchronized(elementCount) + + // TODO: SynchronizedList, SynchronizedRandomAccessList + // override def subList(start: Int, end: Int): List[E] = synchronized { + // new Collections.SynchronizedRandomAccessList[E]( + // super.subList(start, end), + // this + // ) + // } + + override def toArray(): Array[AnyRef] = synchronized { + val result = new Array[AnyRef](elementCount) + System.arraycopy(elementData, 0, result, 0, elementCount) + result + } + + override def toArray[T <: AnyRef](_contents: Array[T]): Array[T] = + synchronized { + val contents = + if (elementCount > _contents.length) + java.lang.reflect.Array + .newInstance(_contents.getClass().getComponentType(), elementCount) + .asInstanceOf[Array[T]] + else _contents + + System.arraycopy(elementData, 0, contents, 0, elementCount) + if (elementCount < contents.length) + contents(elementCount) = null.asInstanceOf[T] + contents + } + + override def toString: String = synchronized { + if (elementCount == 0) return "[]" + val length = elementCount - 1 + val buffer = new java.lang.StringBuilder(elementCount * 16) + buffer.append('[') + for (i <- 0 until length) { + if (elementData(i) eq this) + buffer.append("(this Collection)") + else buffer.append(elementData(i)) + buffer.append(", ") + + } + if (elementData(length) eq this) + buffer.append("(this Collection)") + else buffer.append(elementData(length)) + buffer.append(']') + buffer.toString + } + + def trimToSize(): Unit = synchronized { + if (elementData.length != elementCount) grow(elementCount) + } + + // @throws[IOException] + // private def writeObject(stream: ObjectOutputStream): Unit = { + // stream.defaultWriteObject() + // } +} diff --git a/javalib/src/main/scala/java/util/concurrent/ThreadLocalRandom.scala b/javalib/src/main/scala/java/util/concurrent/ThreadLocalRandom.scala index b4101e3f0f..1c88c5504d 100644 --- a/javalib/src/main/scala/java/util/concurrent/ThreadLocalRandom.scala +++ b/javalib/src/main/scala/java/util/concurrent/ThreadLocalRandom.scala @@ -2,126 +2,641 @@ * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ - * - * and translated to Scala - * Ported from Scala.js commit: bbf0314 dated: Mon, 13 Jun 2022 */ package java.util.concurrent -import java.util.Random -import scala.annotation.tailrec +import java.util._ +import java.util.function._ +import java.util.stream._ +import java.util.concurrent.atomic._ -class ThreadLocalRandom extends Random { +@SerialVersionUID(-5851777807851030925L) +object ThreadLocalRandom { + private def mix64(z0: Long) = { + var z = z0 + z = (z ^ (z >>> 33)) * 0xff51afd7ed558ccdL + z = (z ^ (z >>> 33)) * 0xc4ceb9fe1a85ec53L + z ^ (z >>> 33) + } - private var initialized: Boolean = _ - initialized = true + private def mix32(z0: Long) = { + var z = z0 + z = (z ^ (z >>> 33)) * 0xff51afd7ed558ccdL + (((z ^ (z >>> 33)) * 0xc4ceb9fe1a85ec53L) >>> 32).toInt + } - override def setSeed(seed: Long): Unit = { - if (initialized) - throw new UnsupportedOperationException() + private[concurrent] def localInit(): Unit = { + val p = probeGenerator.addAndGet(PROBE_INCREMENT) + val probe = + if (p == 0) 1 + else p // skip 0 + val seed = mix64(seeder.getAndAdd(SEEDER_INCREMENT)) + val t = Thread.currentThread() + t.threadLocalRandomSeed = seed + t.threadLocalRandomProbe = probe + } - super.setSeed(seed) + def current(): ThreadLocalRandom = { + if (Thread.currentThread().threadLocalRandomProbe == 0) + localInit() + instance } - def nextInt(least: Int, bound: Int): Int = { - if (least >= bound) - throw new IllegalArgumentException() + /** Spliterator for int streams. We multiplex the four int versions into one + * class by treating a bound less than origin as unbounded, and also by + * treating "infinite" as equivalent to Long.MAX_VALUE. For splits, it uses + * the standard divide-by-two approach. The long and double versions of this + * class are identical except for types. + */ + final private class RandomIntsSpliterator( + var index: Long, + fence: Long, + origin: Int, + bound: Int + ) extends Spliterator.OfInt { + override def trySplit(): ThreadLocalRandom.RandomIntsSpliterator = { + val i = index + val m = (i + fence) >>> 1 + if (m <= i) null + else { + index = m + new ThreadLocalRandom.RandomIntsSpliterator(i, m, origin, bound) + } + } + + override def estimateSize(): Long = fence - index - val difference = bound - least - if (difference > 0) { - nextInt(difference) + least - } else { - /* The interval size here is greater than Int.MaxValue, - * so the loop will exit with a probability of at least 1/2. - */ - @tailrec - def loop(): Int = { - val n = nextInt() - if (n >= least && n < bound) n - else loop() + override def characteristics(): Int = { + Spliterator.SIZED | + Spliterator.SUBSIZED | + Spliterator.NONNULL | + Spliterator.IMMUTABLE + } + + override def tryAdvance(consumer: IntConsumer): Boolean = { + if (consumer == null) + throw new NullPointerException + + if (index < fence) { + consumer.accept( + ThreadLocalRandom.current().internalNextInt(origin, bound) + ) + index += 1 + return true } + false + } + + override def forEachRemaining(consumer: IntConsumer): Unit = { + if (consumer == null) + throw new NullPointerException - loop() + if (index < fence) { + var i = index + + index = fence + val rng = ThreadLocalRandom.current() + + while ({ + consumer.accept(rng.internalNextInt(origin, bound)) + i += 1 + i < fence + }) () + } } } - def nextLong(_n: Long): Long = { - if (_n <= 0) - throw new IllegalArgumentException("n must be positive") + final private class RandomLongsSpliterator( + var index: Long, + fence: Long, + origin: Long, + bound: Long + ) extends Spliterator.OfLong { - /* - * Divide n by two until small enough for nextInt. On each - * iteration (at most 31 of them but usually much less), - * randomly choose both whether to include high bit in result - * (offset) and whether to continue with the lower vs upper - * half (which makes a difference only if odd). - */ + override def trySplit(): ThreadLocalRandom.RandomLongsSpliterator = { + val i = index + val m = (i + fence) >>> 1 + if (m <= index) null + else { + index = m + new ThreadLocalRandom.RandomLongsSpliterator(i, m, origin, bound) + } + } + + override def estimateSize(): Long = fence - index + override def characteristics(): Int = { + Spliterator.SIZED | + Spliterator.SUBSIZED | + Spliterator.NONNULL | + Spliterator.IMMUTABLE + } - var offset = 0L - var n = _n + override def tryAdvance(consumer: LongConsumer): Boolean = { + if (consumer == null) + throw new NullPointerException - while (n >= Integer.MAX_VALUE) { - val bits = next(2) - val halfn = n >>> 1 - val nextn = - if ((bits & 2) == 0) halfn - else n - halfn - if ((bits & 1) == 0) - offset += n - nextn - n = nextn + if (index < fence) { + consumer.accept( + ThreadLocalRandom.current().internalNextLong(origin, bound) + ) + index += 1 + return true + } + false + } + + override def forEachRemaining(consumer: LongConsumer): Unit = { + if (consumer == null) + throw new NullPointerException + + if (index < fence) { + val rng = ThreadLocalRandom.current() + + var i = index + index = fence + while ({ + consumer.accept(rng.internalNextLong(origin, bound)) + i += 1 + i < fence + }) () + } } - offset + nextInt(n.toInt) } - def nextLong(least: Long, bound: Long): Long = { - if (least >= bound) - throw new IllegalArgumentException() + final private class RandomDoublesSpliterator( + var index: Long, + fence: Long, + origin: Double, + bound: Double + ) extends Spliterator.OfDouble { - val difference = bound - least - if (difference > 0) { - nextLong(difference) + least - } else { - /* The interval size here is greater than Long.MaxValue, - * so the loop will exit with a probability of at least 1/2. - */ - @tailrec - def loop(): Long = { - val n = nextLong() - if (n >= least && n < bound) n - else loop() + override def trySplit(): ThreadLocalRandom.RandomDoublesSpliterator = { + val m = (index + fence) >>> 1 + if (m <= index) null + else { + val i = index + index = m + new ThreadLocalRandom.RandomDoublesSpliterator(i, m, origin, bound) } + } + override def estimateSize(): Long = fence - index + override def characteristics(): Int = { + Spliterator.SIZED | + Spliterator.SUBSIZED | + Spliterator.NONNULL | + Spliterator.IMMUTABLE + } + override def tryAdvance(consumer: DoubleConsumer): Boolean = { + if (consumer == null) + throw new NullPointerException - loop() + if (index < fence) { + consumer.accept( + ThreadLocalRandom.current().internalNextDouble()(origin, bound) + ) + index += 1 + return true + } + false + } + override def forEachRemaining(consumer: DoubleConsumer): Unit = { + if (consumer == null) + throw new NullPointerException + + if (index < fence) { + val rng = ThreadLocalRandom.current() + var i = index + index = fence + while ({ + consumer.accept(rng.internalNextDouble()(origin, bound)) + i += 1 + i < fence + }) () + } } } - def nextDouble(n: Double): Double = { - if (n <= 0) - throw new IllegalArgumentException("n must be positive") + private[concurrent] def getProbe(): Int = + Thread.currentThread().threadLocalRandomProbe - nextDouble() * n + private[concurrent] def advanceProbe(probe0: Int) = { + var probe = probe0 + probe ^= probe << 13 // xorshift + probe ^= probe >>> 17 + probe ^= probe << 5 + Thread.currentThread().threadLocalRandomProbe = probe + probe } - def nextDouble(least: Double, bound: Double): Double = { - if (least >= bound) - throw new IllegalArgumentException() + private[concurrent] def nextSecondarySeed(): Int = { + val t = Thread.currentThread() + var r: Int = t.threadLocalRandomSecondarySeed + if (r != 0) { + r ^= r << 13 + r ^= r >>> 17 + r ^= r << 5 + } else { + r = mix32(seeder.getAndAdd(SEEDER_INCREMENT)) + if (r == 0) r = 1 // avoid zero + } + // U.putInt(t, SECONDARY, r) + t.threadLocalRandomSecondarySeed = r + r + } - /* Based on documentation for Random.doubles to avoid issue #2144 and other - * possible rounding up issues: - * https://docs.oracle.com/javase/8/docs/api/java/util/Random.html#doubles-double-double- - */ - val next = nextDouble() * (bound - least) + least - if (next < bound) next - else Math.nextAfter(bound, Double.NegativeInfinity) + private[concurrent] def eraseThreadLocals(thread: Thread): Unit = { + // thread.threadLocals = null + // thread.inheritableThreadLocals = null } + + private val GAMMA = 0x9e3779b97f4a7c15L + + private val PROBE_INCREMENT = 0x9e3779b9 + private val SEEDER_INCREMENT = 0xbb67ae8584caa73bL + + private val DOUBLE_UNIT = 1.0 / (1L << 53) + private val FLOAT_UNIT = 1.0f / (1 << 24) + + // IllegalArgumentException messages + private[concurrent] val BAD_BOUND = "bound must be positive" + private[concurrent] val BAD_RANGE = "bound must be greater than origin" + private[concurrent] val BAD_SIZE = "size must be non-negative" + + private val nextLocalGaussian = new ThreadLocal[java.lang.Double] + + private val probeGenerator = new AtomicInteger + + private[concurrent] val instance = new ThreadLocalRandom + + private val seeder = new AtomicLong( + mix64(System.currentTimeMillis()) ^ mix64(System.nanoTime()) + ) } -object ThreadLocalRandom { +@SerialVersionUID(-5851777807851030925L) +class ThreadLocalRandom private () extends Random { + + private[concurrent] var initialized = true + + override def setSeed(seed: Long): Unit = { // only allow call from super() constructor + if (initialized) + throw new UnsupportedOperationException + } + final private[concurrent] def nextSeed(): Long = { + val t = Thread.currentThread() + t.threadLocalRandomSeed += ThreadLocalRandom.GAMMA // read and update per-thread seed + t.threadLocalRandomSeed + } + + override protected def next(bits: Int): Int = nextInt() >>> (32 - bits) + + final private[concurrent] def internalNextLong(origin: Long, bound: Long) = { + var r = ThreadLocalRandom.mix64(nextSeed()) + if (origin < bound) { + val n = bound - origin + val m = n - 1 + if ((n & m) == 0L) { // power of two + r = (r & m) + origin + } else if (n > 0L) { // reject over-represented candidates + var u = r >>> 1 // ensure nonnegative + r = u % n + while ((u + m - r) < 0L) { // rejection check + // retry + u = ThreadLocalRandom.mix64(nextSeed()) >>> 1 + } + r += origin + } else { // range not representable as long + while ({ r < origin || r >= bound }) { + r = ThreadLocalRandom.mix64(nextSeed()) + } + } + } + r + } + + final private[concurrent] def internalNextInt(origin: Int, bound: Int) = { + var r = ThreadLocalRandom.mix32(nextSeed()) + if (origin < bound) { + val n = bound - origin + val m = n - 1 + if ((n & m) == 0) r = (r & m) + origin + else if (n > 0) { + var u = r >>> 1 + r = u % n + while ((u + m - r) < 0) + u = ThreadLocalRandom.mix32(nextSeed()) >>> 1 + r += origin + } else + while ({ r < origin || r >= bound }) { + r = ThreadLocalRandom.mix32(nextSeed()) + } + } + r + } + + final private[concurrent] def internalNextDouble()( + origin: Double, + bound: Double + ) = { + var r = (nextLong() >>> 11) * ThreadLocalRandom.DOUBLE_UNIT + if (origin < bound) { + r = r * (bound - origin) + origin + if (r >= bound) { // correct for rounding + r = java.lang.Double.longBitsToDouble( + java.lang.Double.doubleToLongBits(bound) - 1 + ) + } + } + r + } + + override def nextInt(): Int = ThreadLocalRandom.mix32(nextSeed()) + + override def nextInt(bound: Int): Int = { + if (bound <= 0) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_BOUND) + var r = ThreadLocalRandom.mix32(nextSeed()) + val m = bound - 1 + if ((bound & m) == 0) // power of two + r &= m + else { // reject over-represented candidates + var u = r >>> 1 + while ({ + r = u % bound + (u + m - r) < 0 + }) { + u = ThreadLocalRandom.mix32(nextSeed()) >>> 1 + } + } + assert(r < bound, s"r:$r < bound: $bound") + r + } + + def nextInt(origin: Int, bound: Int): Int = { + if (origin >= bound) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + internalNextInt(origin, bound) + } + + override def nextLong(): Long = ThreadLocalRandom.mix64(nextSeed()) + + def nextLong(bound: Long): Long = { + if (bound <= 0) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_BOUND) + var r = ThreadLocalRandom.mix64(nextSeed()) + val m = bound - 1 + if ((bound & m) == 0L) r &= m + else { + var u: Long = r >>> 1 + r = u % bound + while ({ + r = u % bound + (u + m - r) < 0L + }) + u = ThreadLocalRandom.mix64(nextSeed()) >>> 1 + } + r + } + + def nextLong(origin: Long, bound: Long): Long = { + if (origin >= bound) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + internalNextLong(origin, bound) + } + + override def nextDouble(): Double = + (ThreadLocalRandom.mix64(nextSeed()) >>> 11) * ThreadLocalRandom.DOUBLE_UNIT + + def nextDouble(bound: Double): Double = { + if (!(bound > 0.0)) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_BOUND) + val result = + (ThreadLocalRandom.mix64( + nextSeed() + ) >>> 11) * ThreadLocalRandom.DOUBLE_UNIT * bound + if (result < bound) result + else + java.lang.Double + .longBitsToDouble(java.lang.Double.doubleToLongBits(bound) - 1) + } + + def nextDouble(origin: Double, bound: Double): Double = { + if (!(origin < bound)) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + internalNextDouble()(origin, bound) + } - private val _current = - new ThreadLocalRandom() + override def nextBoolean(): Boolean = ThreadLocalRandom.mix32(nextSeed()) < 0 - def current(): ThreadLocalRandom = _current + override def nextFloat(): Float = + (ThreadLocalRandom.mix32(nextSeed()) >>> 8) * ThreadLocalRandom.FLOAT_UNIT + override def nextGaussian() + : Double = { // Use nextLocalGaussian instead of nextGaussian field + val d = + ThreadLocalRandom.nextLocalGaussian.get().asInstanceOf[java.lang.Double] + if (d != null) { + ThreadLocalRandom.nextLocalGaussian.set(null.asInstanceOf[Double]) + return d.doubleValue() + } + var v1 = .0 + var v2 = .0 + var s = .0 + while ({ + v1 = 2 * nextDouble() - 1 // between -1 and 1 + + v2 = 2 * nextDouble() - 1 + s = v1 * v1 + v2 * v2 + s >= 1 || s == 0 + }) () + + val multiplier = Math.sqrt(-2 * Math.log(s) / s) + ThreadLocalRandom.nextLocalGaussian.set( + java.lang.Double.valueOf(v2 * multiplier).doubleValue() + ) + v1 * multiplier + } + + def ints(streamSize: Long): IntStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_SIZE) + StreamSupport.intStream( + new ThreadLocalRandom.RandomIntsSpliterator( + 0L, + streamSize, + Integer.MAX_VALUE, + 0 + ), + false + ) + } + + def ints(): IntStream = + StreamSupport.intStream( + new ThreadLocalRandom.RandomIntsSpliterator( + 0L, + java.lang.Long.MAX_VALUE, + Integer.MAX_VALUE, + 0 + ), + false + ) + + def ints( + streamSize: Long, + randomNumberOrigin: Int, + randomNumberBound: Int + ): IntStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_SIZE) + if (randomNumberOrigin >= randomNumberBound) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + StreamSupport.intStream( + new ThreadLocalRandom.RandomIntsSpliterator( + 0L, + streamSize, + randomNumberOrigin, + randomNumberBound + ), + false + ) + } + + def ints(randomNumberOrigin: Int, randomNumberBound: Int): IntStream = { + if (randomNumberOrigin >= randomNumberBound) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + StreamSupport.intStream( + new ThreadLocalRandom.RandomIntsSpliterator( + 0L, + java.lang.Long.MAX_VALUE, + randomNumberOrigin, + randomNumberBound + ), + false + ) + } + + def longs(streamSize: Long): LongStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_SIZE) + StreamSupport.longStream( + new ThreadLocalRandom.RandomLongsSpliterator( + 0L, + streamSize, + java.lang.Long.MAX_VALUE, + 0L + ), + false + ) + } + + def longs(): LongStream = + StreamSupport.longStream( + new ThreadLocalRandom.RandomLongsSpliterator( + 0L, + java.lang.Long.MAX_VALUE, + java.lang.Long.MAX_VALUE, + 0L + ), + false + ) + + def longs( + streamSize: Long, + randomNumberOrigin: Long, + randomNumberBound: Long + ): LongStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_SIZE) + if (randomNumberOrigin >= randomNumberBound) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + StreamSupport.longStream( + new ThreadLocalRandom.RandomLongsSpliterator( + 0L, + streamSize, + randomNumberOrigin, + randomNumberBound + ), + false + ) + } + + def longs(randomNumberOrigin: Long, randomNumberBound: Long): LongStream = { + if (randomNumberOrigin >= randomNumberBound) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + StreamSupport.longStream( + new ThreadLocalRandom.RandomLongsSpliterator( + 0L, + java.lang.Long.MAX_VALUE, + randomNumberOrigin, + randomNumberBound + ), + false + ) + } + + override def doubles(streamSize: Long): DoubleStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_SIZE) + StreamSupport.doubleStream( + new ThreadLocalRandom.RandomDoublesSpliterator( + 0L, + streamSize, + java.lang.Double.MAX_VALUE, + 0.0 + ), + false + ) + } + + override def doubles(): DoubleStream = + StreamSupport.doubleStream( + new ThreadLocalRandom.RandomDoublesSpliterator( + 0L, + java.lang.Long.MAX_VALUE, + java.lang.Double.MAX_VALUE, + 0.0 + ), + false + ) + + override def doubles( + streamSize: Long, + randomNumberOrigin: Double, + randomNumberBound: Double + ): DoubleStream = { + if (streamSize < 0L) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_SIZE) + + if (!(randomNumberOrigin < randomNumberBound)) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + + StreamSupport.doubleStream( + new ThreadLocalRandom.RandomDoublesSpliterator( + 0L, + streamSize, + randomNumberOrigin, + randomNumberBound + ), + false + ) + } + + override def doubles( + randomNumberOrigin: Double, + randomNumberBound: Double + ): DoubleStream = { + if (!(randomNumberOrigin < randomNumberBound)) + throw new IllegalArgumentException(ThreadLocalRandom.BAD_RANGE) + StreamSupport.doubleStream( + new ThreadLocalRandom.RandomDoublesSpliterator( + 0L, + java.lang.Long.MAX_VALUE, + randomNumberOrigin, + randomNumberBound + ), + false + ) + } } diff --git a/javalib/src/main/scala/java/util/regex/Matcher.scala b/javalib/src/main/scala/java/util/regex/Matcher.scala index 306868890c..7a338d4919 100644 --- a/javalib/src/main/scala/java/util/regex/Matcher.scala +++ b/javalib/src/main/scala/java/util/regex/Matcher.scala @@ -96,7 +96,7 @@ final class Matcher private[regex] ( } def reset(input: CharSequence): Matcher = { - reset() + underlying.reset(input) _inputSequence = input this } diff --git a/javalib/src/main/scala/java/util/stream/Collectors.scala b/javalib/src/main/scala/java/util/stream/Collectors.scala index 0c941ce77f..481ddd944c 100644 --- a/javalib/src/main/scala/java/util/stream/Collectors.scala +++ b/javalib/src/main/scala/java/util/stream/Collectors.scala @@ -579,59 +579,33 @@ object Collectors { prefix: CharSequence, suffix: CharSequence ): Collector[CharSequence, AnyRef, String] = { - val delimiterLength = delimiter.length() - - val supplier = new Supplier[StringBuilder] { - def get(): StringBuilder = { - val sb = new StringBuilder() - if (prefix != "") - sb.append(prefix) - sb + + val supplier = new Supplier[StringJoiner] { + def get(): StringJoiner = { + new StringJoiner(delimiter, prefix, suffix) } } - val accumulator = new BiConsumer[StringBuilder, CharSequence] { - def accept(accum: StringBuilder, element: CharSequence): Unit = { - val acc = accum.append(element) - if (delimiter != "") - accum.append(delimiter) + val accumulator = new BiConsumer[StringJoiner, CharSequence] { + def accept(accum: StringJoiner, element: CharSequence): Unit = { + accum.add(element) } } - val combiner = new BinaryOperator[StringBuilder] { - def apply( - sb1: StringBuilder, - sb2: StringBuilder - ): StringBuilder = { - sb1.append(sb2) + val combiner = new BinaryOperator[StringJoiner] { + def apply(sj1: StringJoiner, sj2: StringJoiner): StringJoiner = { + sj1.merge(sj2) } } - val finisher = - new Function[StringBuilder, String] { - def apply(accum: StringBuilder): String = { - - if ((accum.length() > prefix.length()) && (delimiterLength > 0)) { - /* This branch means accum has contents beyond a possible prefix. - * If a delimiter arg was is specified, accumlator() will have - * appended that delimiter. A delimiter is unwanted after what is - * now known to be the last item, so trim it off before possibly - * adding a suffix. - */ - val lastIndex = accum.length() - delimiterLength - accum.setLength(lastIndex) // trim off last delimiter sequence. - } - // Else empty stream; no token accepted, hence no delimiter to trim. - - if (suffix != "") - accum.append(suffix) - - accum.toString() - } + val finisher = new Function[StringJoiner, String] { + def apply(accum: StringJoiner): String = { + accum.toString() } + } Collector - .of[CharSequence, StringBuilder, String]( + .of[CharSequence, StringJoiner, String]( supplier, accumulator, combiner, diff --git a/javalib/src/main/scala/java/util/stream/DoubleStream.scala b/javalib/src/main/scala/java/util/stream/DoubleStream.scala index f473f6a0ca..4256822912 100644 --- a/javalib/src/main/scala/java/util/stream/DoubleStream.scala +++ b/javalib/src/main/scala/java/util/stream/DoubleStream.scala @@ -1,9 +1,25 @@ package java.util.stream +import java.{lang => jl} + import java.util._ import java.util.function._ -trait DoubleStream extends BaseStream[Double, DoubleStream] { +/* Design Note: + * + * DoubleStream extends BaseStream[jl.Double, DoubleStream] + * in correspondence to the documentation & usage of Spliterator.Of* + * and PrimitiveIterator.Of*. That is, the first type is a Java container. + * + * In this file "Double" types should be qualified to ease tracing the code + * and prevent confusion & defects. + * * jl.Double indicates an Java Object qua Scala AnyRef is desired. + * * scala.Double indicates a Java "double" primitive is desired. + * Someday, the generated code should be examined to ensure that + * unboxed primitives are actually being used. + */ + +trait DoubleStream extends BaseStream[jl.Double, DoubleStream] { def allMatch(pred: DoublePredicate): Boolean @@ -11,7 +27,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { def average(): OptionalDouble - def boxed(): Stream[Double] + def boxed(): Stream[jl.Double] def collect[R]( supplier: Supplier[R], @@ -46,12 +62,12 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { def tryAdvance(action: DoubleConsumer): Boolean = { if (doneDropping) { - spliter.tryAdvance((e) => action.accept(e)) + spliter.tryAdvance(e => action.accept(e)) } else { var doneLooping = false while (!doneLooping) { val advanced = - spliter.tryAdvance((e) => { + spliter.tryAdvance(e => { if (!pred.test(e)) { action.accept(e) doneDropping = true @@ -108,7 +124,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { val spliter = this.spliterator() // also marks this stream "operated upon" - val buffer = new ArrayDeque[Double]() + val buffer = new ArrayDeque[scala.Double]() // Can not predict replacements, so Spliterator can not be SIZED. // May need to adjust other characteristics. @@ -125,9 +141,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { while (!done) { if (buffer.size() == 0) { val stepped = - spliter.tryAdvance((e: Double) => - mapper.accept(e, r => buffer.add(r)) - ) + spliter.tryAdvance(e => mapper.accept(e, r => buffer.add(r))) done = !stepped } else { action.accept(buffer.removeFirst()) @@ -161,7 +175,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { def peek(action: DoubleConsumer): DoubleStream - def reduce(identity: Double, op: DoubleBinaryOperator): Double + def reduce(identity: scala.Double, op: DoubleBinaryOperator): Double def reduce(op: DoubleBinaryOperator): OptionalDouble @@ -169,7 +183,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { def sorted(): DoubleStream - def sum(): Double + def sum(): scala.Double def summaryStatistics(): DoubleSummaryStatistics @@ -196,7 +210,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { def tryAdvance(action: DoubleConsumer): Boolean = { if (done) false else - spliter.tryAdvance((e) => + spliter.tryAdvance(e => if (!pred.test(e)) done = true else action.accept(e) ) @@ -206,7 +220,7 @@ trait DoubleStream extends BaseStream[Double, DoubleStream] { new DoubleStreamImpl(spl, parallel = false, parent = this) } - def toArray(): Array[Double] + def toArray(): Array[scala.Double] } @@ -223,7 +237,7 @@ object DoubleStream { @FunctionalInterface trait DoubleMapMultiConsumer { - def accept(value: Double, dc: DoubleConsumer): Unit + def accept(value: scala.Double, dc: DoubleConsumer): Unit } def builder(): DoubleStream.Builder = @@ -252,16 +266,19 @@ object DoubleStream { // Since: Java 9 def iterate( - seed: Double, + seed: scala.Double, hasNext: DoublePredicate, next: DoubleUnaryOperator ): DoubleStream = { - // "seed" on RHS here is to keep compiler happy with local var init + // "seed" on RHS here is to keep compiler happy with local var initialize. var previous = seed var seedUsed = false val spliter = - new Spliterators.AbstractDoubleSpliterator(Long.MaxValue, 0) { + new Spliterators.AbstractDoubleSpliterator( + Long.MaxValue, + Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL + ) { def tryAdvance(action: DoubleConsumer): Boolean = { val current = if (seedUsed) next.applyAsDouble(previous) @@ -283,14 +300,18 @@ object DoubleStream { } def iterate( - seed: Double, + seed: scala.Double, f: DoubleUnaryOperator ): DoubleStream = { - var previous = seed // "seed" here is just to keep compiler happy. + // "seed" on RHS here is to keep compiler happy with local var initialize. + var previous = seed var seedUsed = false val spliter = - new Spliterators.AbstractDoubleSpliterator(Long.MaxValue, 0) { + new Spliterators.AbstractDoubleSpliterator( + Long.MaxValue, + Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL + ) { def tryAdvance(action: DoubleConsumer): Boolean = { val current = if (seedUsed) f.applyAsDouble(previous) @@ -308,21 +329,20 @@ object DoubleStream { new DoubleStreamImpl(spliter, parallel = false) } - def of(values: Array[Double]): DoubleStream = { + def of(values: Array[scala.Double]): DoubleStream = { /* One would expect variables arguments to be declared as * "values: Objects*" here. * However, that causes "symbol not found" errors at OS link time. * An implicit conversion must be missing in the javalib environment. */ - val bldr = DoubleStream.builder() - for (j <- values) - bldr.add(j) - - bldr.build() + Arrays.stream(values) } - def of(t: Double): DoubleStream = - DoubleStream.builder().add(t).build() + def of(t: scala.Double): DoubleStream = { + val values = new Array[Double](1) + values(0) = t + DoubleStream.of(values) + } } diff --git a/javalib/src/main/scala/java/util/stream/DoubleStreamImpl.scala b/javalib/src/main/scala/java/util/stream/DoubleStreamImpl.scala index cccb258083..5b6eeb790c 100644 --- a/javalib/src/main/scala/java/util/stream/DoubleStreamImpl.scala +++ b/javalib/src/main/scala/java/util/stream/DoubleStreamImpl.scala @@ -5,6 +5,10 @@ import java.{util => ju} import java.util._ import java.util.function._ +/* See "Design Note" at top of DoubleStream.scala for jl.Double & scala.Double + * TL;DR - later is explicitly used where a primitive is desired. + */ + private[stream] class DoubleStreamImpl( val pipeline: ArrayDeque[DoubleStreamImpl] ) extends DoubleStream { @@ -76,7 +80,7 @@ private[stream] class DoubleStreamImpl( protected def commenceOperation(): Unit = { if (_operatedUpon || _closed) - ObjectStreamImpl.throwIllegalStateException() + StreamImpl.throwIllegalStateException() _operatedUpon = true } @@ -128,7 +132,7 @@ private[stream] class DoubleStreamImpl( // JVM appears to not set "operated upon" here. if (_closed) - ObjectStreamImpl.throwIllegalStateException() + StreamImpl.throwIllegalStateException() // detects & throws on closeHandler == null onCloseQueue.addLast(closeHandler) @@ -144,31 +148,34 @@ private[stream] class DoubleStreamImpl( def sequential(): DoubleStreamImpl = this - def spliterator(): ju.Spliterator[_ <: Double] = { + def spliterator(): Spliterator.OfDouble = { commenceOperation() - _spliter.asInstanceOf[ju.Spliterator[_ <: Double]] + _spliter } def unordered(): DoubleStream = { - /* JVM has an unenforced requirment that a stream and its spliterator - * (can you say Harlan Ellison?) should have the same characteristics. - */ - val masked = _spliter.characteristics() & Spliterator.ORDERED - if (masked == Spliterator.ORDERED) this + if (masked != Spliterator.ORDERED) this // already unordered. else { commenceOperation() - // Clear ORDERED - val unordered = _spliter.characteristics() & ~(Spliterator.ORDERED) + val bitsToClear = + (Spliterator.CONCURRENT + | Spliterator.IMMUTABLE + | Spliterator.NONNULL + | Spliterator.ORDERED + | Spliterator.SIZED + | Spliterator.SUBSIZED) + + val purifiedBits = _characteristics & ~(bitsToClear) val spl = new Spliterators.AbstractDoubleSpliterator( _spliter.estimateSize(), - unordered + purifiedBits ) { def tryAdvance(action: DoubleConsumer): Boolean = - _spliter.tryAdvance((e: Double) => action.accept(e)) + _spliter.tryAdvance((e: scala.Double) => action.accept(e)) } new DoubleStreamImpl(spl, _parallel, pipeline) @@ -184,7 +191,7 @@ private[stream] class DoubleStreamImpl( var mismatchFound = false while (!mismatchFound && - _spliter.tryAdvance((e: Double) => + _spliter.tryAdvance((e: scala.Double) => if (!pred.test(e)) mismatchFound = true )) { /* search */ } @@ -197,7 +204,7 @@ private[stream] class DoubleStreamImpl( var matchFound = false while (!matchFound && - _spliter.tryAdvance((e: Double) => + _spliter.tryAdvance((e: scala.Double) => if (pred.test(e)) matchFound = true )) { /* search */ } @@ -210,13 +217,13 @@ private[stream] class DoubleStreamImpl( var count = 0 var sum = 0.0 - _spliter.forEachRemaining((d: Double) => { count += 1; sum += d }) + _spliter.forEachRemaining((d: scala.Double) => { count += 1; sum += d }) if (count == 0) OptionalDouble.empty() else OptionalDouble.of(sum / count) } - def boxed(): Stream[Double] = - this.mapToObj[Double](d => d) + def boxed(): Stream[jl.Double] = + this.mapToObj[jl.Double](d => scala.Double.box(d)) def collect[R]( supplier: Supplier[R], @@ -227,7 +234,9 @@ private[stream] class DoubleStreamImpl( val result = supplier.get() - _spliter.forEachRemaining((e: Double) => accumulator.accept(result, e)) + _spliter.forEachRemaining((e: scala.Double) => + accumulator.accept(result, e) + ) result } @@ -236,14 +245,14 @@ private[stream] class DoubleStreamImpl( commenceOperation() var count = 0L - _spliter.forEachRemaining((d: Double) => count += 1) + _spliter.forEachRemaining((d: scala.Double) => count += 1) count } def distinct(): DoubleStream = { commenceOperation() - val seenElements = new ju.HashSet[Double]() + val seenElements = new ju.HashSet[scala.Double]() // Some items may be dropped, so the estimated size is a high bound. val estimatedSize = _spliter.estimateSize() @@ -258,7 +267,7 @@ private[stream] class DoubleStreamImpl( var done = false while (!done) { var advanced = - _spliter.tryAdvance((e: Double) => { + _spliter.tryAdvance((e: scala.Double) => { val added = seenElements.add(e) if (added) { @@ -292,7 +301,7 @@ private[stream] class DoubleStreamImpl( var done = false while (!done) { var advanced = - _spliter.tryAdvance((e: Double) => { + _spliter.tryAdvance((e: scala.Double) => { if (pred.test(e)) { action.accept(e) done = true @@ -322,7 +331,9 @@ private[stream] class DoubleStreamImpl( def findFirst(): OptionalDouble = { commenceOperation() var optional = OptionalDouble.empty() - _spliter.tryAdvance((e: Double) => { optional = OptionalDouble.of(e) }) + _spliter.tryAdvance((e: scala.Double) => { + optional = OptionalDouble.of(e) + }) optional } @@ -355,6 +366,14 @@ private[stream] class DoubleStreamImpl( } def limit(maxSize: Long): DoubleStream = { + + /* Important: + * See Issue #3309 & StreamImpl#limit for discussion of size + * & characteristics in JVM 17 (and possibly as early as JVM 12) + * for parallel ORDERED streams. + * The behavior implemented here is Java 8 and at least Java 11. + */ + if (maxSize < 0) throw new IllegalArgumentException(maxSize.toString()) @@ -362,15 +381,23 @@ private[stream] class DoubleStreamImpl( var nSeen = 0L + val startingBits = _spliter.characteristics() + + val alwaysClearedBits = + Spliterator.SIZED | Spliterator.SUBSIZED | + Spliterator.NONNULL | Spliterator.IMMUTABLE | Spliterator.CONCURRENT + + val newStreamCharacteristics = startingBits & ~alwaysClearedBits + val spl = new Spliterators.AbstractDoubleSpliterator( - maxSize, - _spliter.characteristics() + Long.MaxValue, + newStreamCharacteristics ) { def tryAdvance(action: DoubleConsumer): Boolean = if (nSeen >= maxSize) false else { var advanced = - _spliter.tryAdvance((e: Double) => action.accept(e)) + _spliter.tryAdvance((e: scala.Double) => action.accept(e)) nSeen = if (advanced) nSeen + 1 else Long.MaxValue @@ -392,7 +419,7 @@ private[stream] class DoubleStreamImpl( _spliter.characteristics() ) { def tryAdvance(action: DoubleConsumer): Boolean = - _spliter.tryAdvance((e: Double) => + _spliter.tryAdvance((e: scala.Double) => action.accept(mapper.applyAsDouble(e)) ) } @@ -413,25 +440,25 @@ private[stream] class DoubleStreamImpl( _spliter.characteristics() ) { def tryAdvance(action: Consumer[_ >: U]): Boolean = - _spliter.tryAdvance((e: Double) => action.accept(mapper(e))) + _spliter.tryAdvance((e: scala.Double) => action.accept(mapper(e))) } - new ObjectStreamImpl[U]( + new StreamImpl[U]( spl, _parallel, pipeline - .asInstanceOf[ArrayDeque[ObjectStreamImpl[U]]] + .asInstanceOf[ArrayDeque[StreamImpl[U]]] ) } def max(): OptionalDouble = { commenceOperation() - var max: Double = jl.Double.NEGATIVE_INFINITY + var max: scala.Double = jl.Double.NEGATIVE_INFINITY var exitEarly = false // leave loop after first NaN encountered, if any. - def body(d: Double): Unit = { + def body(d: scala.Double): Unit = { if (d.isNaN()) { max = d exitEarly = true @@ -440,12 +467,12 @@ private[stream] class DoubleStreamImpl( } } - val advanced = _spliter.tryAdvance((d: Double) => body(d)) + val advanced = _spliter.tryAdvance((d: scala.Double) => body(d)) if (!advanced) OptionalDouble.empty() else { while (!exitEarly && - _spliter.tryAdvance((d: Double) => body(d))) { /* search */ } + _spliter.tryAdvance((d: scala.Double) => body(d))) { /* search */ } OptionalDouble.of(max) } } @@ -453,11 +480,11 @@ private[stream] class DoubleStreamImpl( def min(): OptionalDouble = { commenceOperation() - var min: Double = jl.Double.POSITIVE_INFINITY + var min: scala.Double = jl.Double.POSITIVE_INFINITY var exitEarly = false // leave loop after first NaN encountered, if any. - def body(d: Double): Unit = { + def body(d: scala.Double): Unit = { if (d.isNaN()) { min = d exitEarly = true @@ -465,12 +492,12 @@ private[stream] class DoubleStreamImpl( min = d } } - val advanced = _spliter.tryAdvance((d: Double) => body(d)) + val advanced = _spliter.tryAdvance((d: scala.Double) => body(d)) if (!advanced) OptionalDouble.empty() else { while (!exitEarly && - _spliter.tryAdvance((d: Double) => body(d))) { /* search */ } + _spliter.tryAdvance((d: scala.Double) => body(d))) { /* search */ } OptionalDouble.of(min) } } @@ -491,7 +518,7 @@ private[stream] class DoubleStreamImpl( ) { def tryAdvance(action: DoubleConsumer): Boolean = - _spliter.tryAdvance((e: Double) => { + _spliter.tryAdvance((e: scala.Double) => { peekAction.accept(e) action.accept(e) }) @@ -505,10 +532,10 @@ private[stream] class DoubleStreamImpl( var reduceOpt = OptionalDouble.empty() - _spliter.tryAdvance((e: Double) => reduceOpt = OptionalDouble.of(e)) + _spliter.tryAdvance((e: scala.Double) => reduceOpt = OptionalDouble.of(e)) reduceOpt.ifPresent((first) => { var previous = first - _spliter.forEachRemaining((e: Double) => + _spliter.forEachRemaining((e: scala.Double) => previous = accumulator.applyAsDouble(previous, e) ) reduceOpt = OptionalDouble.of(previous) @@ -517,12 +544,15 @@ private[stream] class DoubleStreamImpl( reduceOpt } - def reduce(identity: Double, accumulator: DoubleBinaryOperator): Double = { + def reduce( + identity: scala.Double, + accumulator: DoubleBinaryOperator + ): scala.Double = { commenceOperation() var accumulated = identity - _spliter.forEachRemaining((e: Double) => + _spliter.forEachRemaining((e: scala.Double) => accumulated = accumulator.applyAsDouble(accumulated, e) ) accumulated @@ -537,45 +567,65 @@ private[stream] class DoubleStreamImpl( var nSkipped = 0L while ((nSkipped < n) - && (_spliter.tryAdvance((e: Double) => nSkipped += 1L))) { /* skip */ } + && (_spliter + .tryAdvance((e: scala.Double) => nSkipped += 1L))) { /* skip */ } // Follow JVM practice; return new stream, not remainder of "this" stream. new DoubleStreamImpl(_spliter, _parallel, pipeline) } def sorted(): DoubleStream = { - commenceOperation() + // No commenceOperation() here. This is an intermediate operation. - /* Be aware that this method will/should throw on first use if type - * T is not Comparable[T]. This is described in the Java Stream doc. - * - * Implementation note: - * It would seem that Comparator.naturalOrder() - * could be used here. The SN complier complains, rightly, that - * T is not known to be [T <: Comparable[T]]. That is because - * T may actually not _be_ comparable. The comparator below punts - * the issue and raises an exception if T is, indeed, not comparable. - */ + class SortingSpliterOfDoubleSupplier( + srcSpliter: Spliterator.OfDouble + ) extends Supplier[Spliterator.OfDouble] { + + def get(): Spliterator.OfDouble = { + val knownSize = _spliter.getExactSizeIfKnown() + + if (knownSize > Integer.MAX_VALUE) { + throw new IllegalArgumentException( + "Stream size exceeds max array size" + ) + } else { + /* Sufficiently large streams, with either known or unknown size may + * eventually throw an OutOfMemoryError exception, same as JVM. + * + * sorting streams of unknown size is likely to be _slow_. + */ + + val buffer = toArray() - val buffer = new ArrayList[Double]() - _spliter.forEachRemaining((e: Double) => { buffer.add(e); () }) + Arrays.sort(buffer) - // See if there is a more efficient way of doing this. - val nElements = buffer.size() - val primitiveDoubles = new Array[Double](nElements) - for (j <- 0 until nElements) - primitiveDoubles(j) = buffer.get(j) + val startingBits = _spliter.characteristics() + val alwaysSetBits = + Spliterator.SORTED | Spliterator.ORDERED | + Spliterator.SIZED | Spliterator.SUBSIZED + + // Time & experience may show that additional bits need to be cleared + val alwaysClearedBits = Spliterator.IMMUTABLE + + val newCharacteristics = + (startingBits | alwaysSetBits) & ~alwaysClearedBits + + Spliterators.spliterator(buffer, newCharacteristics) + } + } + } - Arrays.sort(primitiveDoubles) - Arrays.stream(primitiveDoubles) + // Do the sort in the eventual terminal operation, not now. + val spl = new SortingSpliterOfDoubleSupplier(_spliter) + new DoubleStreamImpl(spl, 0, _parallel) } - def sum(): Double = { + def sum(): scala.Double = { commenceOperation() var sum = 0.0 - _spliter.forEachRemaining((d: Double) => sum += d) + _spliter.forEachRemaining((d: scala.Double) => sum += d) sum } @@ -584,32 +634,32 @@ private[stream] class DoubleStreamImpl( val stats = new DoubleSummaryStatistics() - _spliter.forEachRemaining((d: Double) => stats.accept(d)) + _spliter.forEachRemaining((d: scala.Double) => stats.accept(d)) stats } - def toArray(): Array[Double] = { + def toArray(): Array[scala.Double] = { commenceOperation() val knownSize = _spliter.getExactSizeIfKnown() if (knownSize < 0) { - val buffer = new ArrayList[Double]() - _spliter.forEachRemaining((e: Double) => { buffer.add(e); () }) + val buffer = new ArrayList[scala.Double]() + _spliter.forEachRemaining((e: scala.Double) => { buffer.add(e); () }) // See if there is a more efficient way of doing this. val nElements = buffer.size() - val primitiveDoubles = new Array[Double](nElements) + val primitiveDoubles = new Array[scala.Double](nElements) for (j <- 0 until nElements) primitiveDoubles(j) = buffer.get(j) primitiveDoubles } else { - val primitiveDoubles = new Array[Double](knownSize.toInt) + val primitiveDoubles = new Array[scala.Double](knownSize.toInt) var j = 0 - _spliter.forEachRemaining((e: Double) => { + _spliter.forEachRemaining((e: scala.Double) => { primitiveDoubles(j) = e j += 1 }) @@ -622,18 +672,18 @@ private[stream] class DoubleStreamImpl( object DoubleStreamImpl { class Builder extends DoubleStream.Builder { - private val buffer = new ArrayList[Double]() + private val buffer = new ArrayList[scala.Double]() private var built = false - override def accept(t: Double): Unit = - if (built) ObjectStreamImpl.throwIllegalStateException() + override def accept(t: scala.Double): Unit = + if (built) StreamImpl.throwIllegalStateException() else buffer.add(t) override def build(): DoubleStream = { built = true // See if there is a more efficient way of doing this. val nElements = buffer.size() - val primitiveDoubles = new Array[Double](nElements) + val primitiveDoubles = new Array[scala.Double](nElements) for (j <- 0 until nElements) primitiveDoubles(j) = buffer.get(j) @@ -679,7 +729,7 @@ object DoubleStreamImpl { spliter.characteristics() ) { def tryAdvance(action: Consumer[_ >: DoubleStream]): Boolean = { - spliter.tryAdvance((e: Double) => action.accept(mapper(e))) + spliter.tryAdvance((e: scala.Double) => action.accept(mapper(e))) } } @@ -798,7 +848,7 @@ object DoubleStreamImpl { } def concat(a: DoubleStream, b: DoubleStream): DoubleStream = { - /* See ""Design Note" at corresponding place in ObjectStreamImpl. + /* See ""Design Note" at corresponding place in StreamImpl. * This implementaton shares the same noted "features". */ val aImpl = a.asInstanceOf[DoubleStreamImpl] diff --git a/javalib/src/main/scala/java/util/stream/Stream.scala b/javalib/src/main/scala/java/util/stream/Stream.scala index 6263667e11..53be65eb4f 100644 --- a/javalib/src/main/scala/java/util/stream/Stream.scala +++ b/javalib/src/main/scala/java/util/stream/Stream.scala @@ -65,7 +65,7 @@ trait Stream[T] extends BaseStream[T, Stream[T]] { } } - new ObjectStreamImpl[T](spl, parallel = false, parent = this) + new StreamImpl[T](spl, parallel = false, parent = this) } def filter(pred: Predicate[_ >: T]): Stream[T] @@ -146,7 +146,7 @@ trait Stream[T] extends BaseStream[T, Stream[T]] { } } - (new ObjectStreamImpl[R]( + (new StreamImpl[R]( spl, parallel = false, parent = this.asInstanceOf[Stream[R]] @@ -193,7 +193,7 @@ trait Stream[T] extends BaseStream[T, Stream[T]] { } val coercedPriorStages = this - .asInstanceOf[ObjectStreamImpl[T]] + .asInstanceOf[StreamImpl[T]] .pipeline .asInstanceOf[ArrayDeque[DoubleStreamImpl]] @@ -245,8 +245,6 @@ trait Stream[T] extends BaseStream[T, Stream[T]] { def skip(n: Long): Stream[T] - def spliterator(): Spliterator[_ <: T] - def sorted(): Stream[T] def sorted(comparator: Comparator[_ >: T]): Stream[T] @@ -281,7 +279,7 @@ trait Stream[T] extends BaseStream[T, Stream[T]] { } } - new ObjectStreamImpl[T](spl, parallel = false, parent = this) + new StreamImpl[T](spl, parallel = false, parent = this) } def toArray(): Array[Object] @@ -320,13 +318,13 @@ object Stream { def build(): Stream[T] } - def builder[T](): Builder[T] = new ObjectStreamImpl.Builder[T] + def builder[T](): Builder[T] = new StreamImpl.Builder[T] def concat[T](a: Stream[_ <: T], b: Stream[_ <: T]): Stream[T] = - ObjectStreamImpl.concat(a, b) + StreamImpl.concat(a, b) def empty[T](): Stream[T] = - new ObjectStreamImpl(Spliterators.emptySpliterator[T](), parallel = false) + new StreamImpl(Spliterators.emptySpliterator[T](), parallel = false) def generate[T](s: Supplier[T]): Stream[T] = { val spliter = @@ -337,7 +335,7 @@ object Stream { } } - new ObjectStreamImpl(spliter, parallel = false) + new StreamImpl(spliter, parallel = false) } // Since: Java 9 @@ -351,7 +349,10 @@ object Stream { var seedUsed = false val spliter = - new Spliterators.AbstractSpliterator[T](Long.MaxValue, 0) { + new Spliterators.AbstractSpliterator[T]( + Long.MaxValue, + Spliterator.ORDERED | Spliterator.IMMUTABLE + ) { def tryAdvance(action: Consumer[_ >: T]): Boolean = { val current = if (seedUsed) next(previous) @@ -369,7 +370,7 @@ object Stream { } } - new ObjectStreamImpl(spliter, parallel = false) + new StreamImpl(spliter, parallel = false) } def iterate[T](seed: T, f: UnaryOperator[T]): Stream[T] = { @@ -377,7 +378,10 @@ object Stream { var seedUsed = false val spliter = - new Spliterators.AbstractSpliterator[T](Long.MaxValue, 0) { + new Spliterators.AbstractSpliterator[T]( + Long.MaxValue, + Spliterator.ORDERED | Spliterator.IMMUTABLE + ) { def tryAdvance(action: Consumer[_ >: T]): Boolean = { val current = if (seedUsed) f(previous) @@ -392,7 +396,7 @@ object Stream { } } - new ObjectStreamImpl(spliter, parallel = false) + new StreamImpl(spliter, parallel = false) } def of[T](values: Array[Object]): Stream[T] = { @@ -402,14 +406,14 @@ object Stream { * An implicit conversion must be missing in the javalib environment. */ - val bldr = Stream.builder[T]() - for (j <- values) - bldr.add(j.asInstanceOf[T]) - bldr.build() + Arrays.stream(values).asInstanceOf[Stream[T]] } - def of[T](t: Object): Stream[T] = - Stream.builder[T]().add(t.asInstanceOf[T]).build() + def of[T](t: Object): Stream[T] = { + val values = new Array[Object](1) + values(0) = t + Stream.of(values) + } // Since: Java 9 def ofNullable[T <: Object](t: T): Stream[T] = { diff --git a/javalib/src/main/scala/java/util/stream/ObjectStreamImpl.scala b/javalib/src/main/scala/java/util/stream/StreamImpl.scala similarity index 74% rename from javalib/src/main/scala/java/util/stream/ObjectStreamImpl.scala rename to javalib/src/main/scala/java/util/stream/StreamImpl.scala index 142be3743e..2c938220f7 100644 --- a/javalib/src/main/scala/java/util/stream/ObjectStreamImpl.scala +++ b/javalib/src/main/scala/java/util/stream/StreamImpl.scala @@ -5,8 +5,8 @@ import java.util._ import java.util.function._ import java.util.stream.Collector._ -private[stream] class ObjectStreamImpl[T]( - val pipeline: ArrayDeque[ObjectStreamImpl[T]] +private[stream] class StreamImpl[T]( + val pipeline: ArrayDeque[StreamImpl[T]] ) extends Stream[T] { var _spliterArg: Spliterator[T] = _ var _supplier: Supplier[Spliterator[T]] = _ @@ -30,7 +30,7 @@ private[stream] class ObjectStreamImpl[T]( spliterator: Spliterator[T], parallel: Boolean ) = { - this(new ArrayDeque[ObjectStreamImpl[T]]) + this(new ArrayDeque[StreamImpl[T]]) _spliterArg = spliterator _parallel = parallel } @@ -40,7 +40,7 @@ private[stream] class ObjectStreamImpl[T]( parallel: Boolean, parent: Stream[_ <: T] ) = { - this(parent.asInstanceOf[ObjectStreamImpl[T]].pipeline) + this(parent.asInstanceOf[StreamImpl[T]].pipeline) _spliterArg = spliterator _parallel = parallel } @@ -48,7 +48,7 @@ private[stream] class ObjectStreamImpl[T]( def this( spliterator: Spliterator[T], parallel: Boolean, - pipeline: ArrayDeque[ObjectStreamImpl[T]] + pipeline: ArrayDeque[StreamImpl[T]] ) = { this(pipeline) _spliterArg = spliterator @@ -60,7 +60,7 @@ private[stream] class ObjectStreamImpl[T]( characteristics: Int, parallel: Boolean ) = { - this(new ArrayDeque[ObjectStreamImpl[T]]) + this(new ArrayDeque[StreamImpl[T]]) _supplier = supplier _parallel = parallel _characteristics = characteristics @@ -76,14 +76,14 @@ private[stream] class ObjectStreamImpl[T]( protected def commenceOperation(): Unit = { if (_operatedUpon || _closed) - ObjectStreamImpl.throwIllegalStateException() + StreamImpl.throwIllegalStateException() _operatedUpon = true } def close(): Unit = { if (!_closed) { - val exceptionBuffer = new ObjectStreamImpl.CloseExceptionBuffer() + val exceptionBuffer = new StreamImpl.CloseExceptionBuffer() val it = pipeline.iterator() while (it.hasNext()) { @@ -101,7 +101,7 @@ private[stream] class ObjectStreamImpl[T]( private def closeStage(): Unit = { _closed = true - val exceptionBuffer = new ObjectStreamImpl.CloseExceptionBuffer() + val exceptionBuffer = new StreamImpl.CloseExceptionBuffer() if (onCloseQueueActive) { val it = onCloseQueue.iterator() @@ -128,7 +128,7 @@ private[stream] class ObjectStreamImpl[T]( // JVM appears to not set "operated upon" here. if (_closed) - ObjectStreamImpl.throwIllegalStateException() + StreamImpl.throwIllegalStateException() // detects & throws on closeHandler == null onCloseQueue.addLast(closeHandler) @@ -149,28 +149,31 @@ private[stream] class ObjectStreamImpl[T]( } def unordered(): Stream[T] = { - /* JVM has an unenforced requirment that a stream and its spliterator - * (can you say Harlan Ellison?) should have the same characteristics. - */ - val masked = _spliter.characteristics() & Spliterator.ORDERED - if (masked == Spliterator.ORDERED) this + if (masked != Spliterator.ORDERED) this // already unordered. else { commenceOperation() - // Clear ORDERED - val unordered = _spliter.characteristics() & ~(Spliterator.ORDERED) + val bitsToClear = + (Spliterator.CONCURRENT + | Spliterator.IMMUTABLE + | Spliterator.NONNULL + | Spliterator.ORDERED + | Spliterator.SIZED + | Spliterator.SUBSIZED) + + val purifiedBits = _characteristics & ~(bitsToClear) val spl = new Spliterators.AbstractSpliterator[T]( _spliter.estimateSize(), - unordered + purifiedBits ) { def tryAdvance(action: Consumer[_ >: T]): Boolean = _spliter.tryAdvance((e) => action.accept(e)) } - new ObjectStreamImpl[T](spl, _parallel, pipeline) + new StreamImpl[T](spl, _parallel, pipeline) } } @@ -283,7 +286,7 @@ private[stream] class ObjectStreamImpl[T]( } } - new ObjectStreamImpl[T](spl, _parallel, pipeline) + new StreamImpl[T](spl, _parallel, pipeline) } def filter(pred: Predicate[_ >: T]): Stream[T] = { @@ -315,7 +318,7 @@ private[stream] class ObjectStreamImpl[T]( success } } - new ObjectStreamImpl[T](spl, _parallel, pipeline) + new StreamImpl[T](spl, _parallel, pipeline) } /* delegating to findFirst() is an implementation ~~hack~~ expediency. @@ -339,16 +342,16 @@ private[stream] class ObjectStreamImpl[T]( ): Stream[R] = { commenceOperation() - val csf = new ObjectStreamImpl.CompoundSpliteratorFactory[T, R]( + val csf = new StreamImpl.CompoundSpliteratorFactory[T, R]( _spliter, mapper, closeOnFirstTouch = true ) val coercedPriorStages = pipeline - .asInstanceOf[ArrayDeque[ObjectStreamImpl[R]]] + .asInstanceOf[ArrayDeque[StreamImpl[R]]] - new ObjectStreamImpl[R](csf.get(), _parallel, coercedPriorStages) + new StreamImpl[R](csf.get(), _parallel, coercedPriorStages) } def flatMapToDouble( @@ -357,7 +360,7 @@ private[stream] class ObjectStreamImpl[T]( commenceOperation() val supplier = - new ObjectStreamImpl.DoublePrimitiveCompoundSpliteratorFactory[T]( + new StreamImpl.DoublePrimitiveCompoundSpliteratorFactory[T]( _spliter, mapper, closeOnFirstTouch = true @@ -395,6 +398,16 @@ private[stream] class ObjectStreamImpl[T]( } def limit(maxSize: Long): Stream[T] = { + + /* Important: + * See Issue #3309 for discussion of size & characteristics + * in JVM 17 (and possibly as early as JVM 12) for parallel ORDERED + * streams. The behavior implemented here is Java 8 and at least Java 11. + * + * If you are reading this block with more than a passing interest, + * prepare yourself for not having a good day, the muck is deep. + */ + if (maxSize < 0) throw new IllegalArgumentException(maxSize.toString()) @@ -402,9 +415,17 @@ private[stream] class ObjectStreamImpl[T]( var nSeen = 0L + val startingBits = _spliter.characteristics() + + val alwaysClearedBits = + Spliterator.SIZED | Spliterator.SUBSIZED | + Spliterator.NONNULL | Spliterator.IMMUTABLE | Spliterator.CONCURRENT + + val newStreamCharacteristics = startingBits & ~alwaysClearedBits + val spl = new Spliterators.AbstractSpliterator[T]( - maxSize, - _spliter.characteristics() + Long.MaxValue, + newStreamCharacteristics ) { def tryAdvance(action: Consumer[_ >: T]): Boolean = if (nSeen >= maxSize) false @@ -419,7 +440,7 @@ private[stream] class ObjectStreamImpl[T]( } } - new ObjectStreamImpl[T](spl, _parallel, pipeline) + new StreamImpl[T](spl, _parallel, pipeline) } def map[R]( @@ -439,7 +460,7 @@ private[stream] class ObjectStreamImpl[T]( * Type erasure is what makes this work, once one lies to the compiler * about the types involved. */ - new ObjectStreamImpl[T]( + new StreamImpl[T]( spl.asInstanceOf[Spliterator[T]], _parallel, pipeline @@ -536,7 +557,7 @@ private[stream] class ObjectStreamImpl[T]( }) } - new ObjectStreamImpl[T](spl, _parallel, pipeline) + new StreamImpl[T](spl, _parallel, pipeline) } def reduce(accumulator: BinaryOperator[T]): Optional[T] = { @@ -594,11 +615,11 @@ private[stream] class ObjectStreamImpl[T]( && (_spliter.tryAdvance((e) => nSkipped += 1L))) { /* skip */ } // Follow JVM practice; return new stream, not remainder of "this" stream. - new ObjectStreamImpl[T](_spliter, _parallel, pipeline) + new StreamImpl[T](_spliter, _parallel, pipeline) } def sorted(): Stream[T] = { - // No commenceOperation() here. sorted(comparator) will make that happen. + // No commenceOperation() here. This is an intermediate operation. /* Be aware that this method will/should throw on first use if type * T is not Comparable[T]. This is described in the Java Stream doc. @@ -620,13 +641,76 @@ private[stream] class ObjectStreamImpl[T]( } def sorted(comparator: Comparator[_ >: T]): Stream[T] = { - commenceOperation() + // No commenceOperation() here. This is an intermediate operation. + + /* Someday figure out the types for the much cleaner 'toArray(generator)' + * There is a bit of type nastiness/abuse going on here. + * The hidden assumption is that type is a subclass of Object, or at + * least AnyRef (T <: Object). However the class declaration places + * no such restriction on T. It is T <: Any. + * + * The Ancients, in their wisdom, must have had a reason for declaring + * the type that way. + * + * I think the class declaration is _wrong_, or at leads one to + * type abuse, such as here. However, that declaration is pretty + * hardwired at this point. Perhaps it will get corrected across + * the board for Scala Native 1.0. + * + * Until then make the shaky assumption that the class creator is always + * specifying T <: AnyRef so that the coercion will work at + * runtime. + */ + + class SortingSpliterSupplier[T]( + srcSpliter: Spliterator[T], + comparator: Comparator[_ >: T] + ) extends Supplier[Spliterator[T]] { + + def get(): Spliterator[T] = { + val knownSize = _spliter.getExactSizeIfKnown() + + if (knownSize > Integer.MAX_VALUE) { + throw new IllegalArgumentException( + "Stream size exceeds max array size" + ) + } else { + /* Sufficiently large streams, with either known or unknown size may + * eventually throw an OutOfMemoryError exception, same as JVM. + * + * sorting streams of unknown size is likely to be _slow_. + */ + + val buffer = toArray() + + /* Scala 3 and 2.13.11 both allow ""Arrays.sort(" here. + * Scala 2.12.18 requires "sort[Object](". + */ + Arrays + .sort[Object]( + buffer, + comparator.asInstanceOf[Comparator[_ >: Object]] + ) - val buffer = new ArrayList[T]() - _spliter.forEachRemaining((e) => buffer.add(e)) + val startingBits = _spliter.characteristics() + val alwaysSetBits = + Spliterator.SORTED | Spliterator.ORDERED | + Spliterator.SIZED | Spliterator.SUBSIZED - buffer.sort(comparator) - buffer.stream() + // Time & experience may show that additional bits need to be cleared + val alwaysClearedBits = Spliterator.IMMUTABLE + + val newCharacteristics = + (startingBits | alwaysSetBits) & ~alwaysClearedBits + + Spliterators.spliterator[T](buffer, newCharacteristics) + } + } + } + + // Do the sort in the eventual terminal operation, not now. + val spl = new SortingSpliterSupplier[T](_spliter, comparator) + new StreamImpl[T](spl, 0, _parallel) } def toArray(): Array[Object] = { @@ -649,12 +733,113 @@ private[stream] class ObjectStreamImpl[T]( } } + private class ArrayBuilder[A <: Object](generator: IntFunction[Array[A]]) { + /* The supplied generator is used to create the final Array and + * to allocate the accumulated chunks. + * + * This implementation honors the spirit but perhaps not the letter + * of the JVM description. + * + * The 'chunks' ArrayList accumulator is allocated using the 'normal' + * allocator for Java Objects. One could write a custom ArrayList + * (or other) implementation which uses the supplied generator + * to allocate & grow the accumulator. That is outside the bounds + * and resources of the current effort. + */ + + final val chunkSize = 1024 // A wild guestimate, see what experience brings + + class ArrayChunk(val contents: Array[A]) { + var nUsed = 0 + + def add(e: A): Unit = { + /* By contract, the sole caller accept() has already checked for + * sufficient remaining size. Minimize number of index bounds checks. + */ + contents(nUsed) = e + nUsed += 1 + } + } + + var currentChunk: ArrayChunk = _ + val chunks = new ArrayList[ArrayChunk]() + + def createChunk(): Unit = { + currentChunk = new ArrayChunk(generator(chunkSize)) + chunks.add(currentChunk) + } + + createChunk() // prime the list with an initial chunk. + + def accept(e: A): Unit = { + if (currentChunk.nUsed >= chunkSize) + createChunk() + + currentChunk.add(e) + } + + def getTotalSize(): Int = { // Largest possible Array size is an Int + + // Be careful with a potentially partially filled trailing chunk. + var total = 0 + + val spliter = chunks.spliterator() + + // Be friendly to Scala 2.12 + val action: Consumer[ArrayChunk] = (e: ArrayChunk) => total += e.nUsed + + while (spliter.tryAdvance(action)) { /* side-effect */ } + + total + } + + def build(): Array[A] = { + /* Unfortunately, the chunks list is traversed twice. + * Someday fate & cleverness may bring a better algorithm. + * For now, existence & correctness bring more benefit than perfection. + */ + val dest = generator(getTotalSize()) + + var srcPos = 0 + + val spliter = chunks.spliterator() + + // Be friendly to Scala 2.12 + val action: Consumer[ArrayChunk] = (e: ArrayChunk) => { + val length = e.nUsed + System.arraycopy( + e.contents, + 0, + dest, + srcPos, + length + ) + + srcPos += length + } + + while (spliter.tryAdvance(action)) { /* side-effect */ } + + dest + } + } + + private def toArrayUnknownSize[A <: Object]( + generator: IntFunction[Array[A]] + ): Array[A] = { + val arrayBuilder = new ArrayBuilder[A](generator) + + _spliter.forEachRemaining((e: T) => arrayBuilder.accept(e.asInstanceOf[A])) + + arrayBuilder.build() + } + def toArray[A <: Object](generator: IntFunction[Array[A]]): Array[A] = { commenceOperation() val knownSize = _spliter.getExactSizeIfKnown() if (knownSize < 0) { - toArray().asInstanceOf[Array[A]] + toArrayUnknownSize(generator) } else { val dst = generator(knownSize.toInt) var j = 0 @@ -668,20 +853,20 @@ private[stream] class ObjectStreamImpl[T]( } -object ObjectStreamImpl { +object StreamImpl { class Builder[T] extends Stream.Builder[T] { private var built = false private val buffer = new ArrayList[T]() override def accept(t: T): Unit = - if (built) ObjectStreamImpl.throwIllegalStateException() + if (built) StreamImpl.throwIllegalStateException() else buffer.add(t) override def build(): Stream[T] = { built = true val spliter = buffer.spliterator() - new ObjectStreamImpl(spliter, parallel = false) + new StreamImpl(spliter, parallel = false) } } @@ -691,21 +876,6 @@ object ObjectStreamImpl { def add(e: Exception): Unit = buffer.addLast(e) def reportExceptions(): Unit = { - /* - val it = buffer.iterator() - - if (it.hasNext()) { - val firstException = it.next() - - while (it.hasNext()) { - val e = it.next() - if (e != firstException) - firstException.addSuppressed(e) - } - - throw (firstException) - } - */ if (!buffer.isEmpty()) { val firstException = buffer.removeFirst() @@ -757,7 +927,7 @@ object ObjectStreamImpl { private var currentSpliter: ju.Spliterator[_ <: R] = Spliterators.emptySpliterator[R]() - var currentStream = Optional.empty[ObjectStreamImpl[R]]() + var currentStream = Optional.empty[StreamImpl[R]]() def tryAdvance(action: Consumer[_ >: R]): Boolean = { var advanced = false @@ -778,7 +948,7 @@ object ObjectStreamImpl { done = !substreams .tryAdvance((e) => currentSpliter = { - val eOfR = e.asInstanceOf[ObjectStreamImpl[R]] + val eOfR = e.asInstanceOf[StreamImpl[R]] currentStream = Optional.of(eOfR) /* Tricky bit here! @@ -902,8 +1072,8 @@ object ObjectStreamImpl { * until the bug reports start pouring in. */ - val aImpl = a.asInstanceOf[ObjectStreamImpl[T]] - val bImpl = b.asInstanceOf[ObjectStreamImpl[T]] + val aImpl = a.asInstanceOf[StreamImpl[T]] + val bImpl = b.asInstanceOf[StreamImpl[T]] aImpl.commenceOperation() bImpl.commenceOperation() @@ -920,10 +1090,10 @@ object ObjectStreamImpl { val pipelineA = aImpl.pipeline val pipelineB = bImpl.pipeline - val pipelines = new ArrayDeque[ObjectStreamImpl[T]](pipelineA) + val pipelines = new ArrayDeque[StreamImpl[T]](pipelineA) pipelines.addAll(pipelineB) - new ObjectStreamImpl[T](csf.get(), parallel = false, pipelines) + new StreamImpl[T](csf.get(), parallel = false, pipelines) } def throwIllegalStateException(): Unit = { diff --git a/javalib/src/main/scala/java/util/stream/StreamSupport.scala b/javalib/src/main/scala/java/util/stream/StreamSupport.scala index a25d28067b..94481a929d 100644 --- a/javalib/src/main/scala/java/util/stream/StreamSupport.scala +++ b/javalib/src/main/scala/java/util/stream/StreamSupport.scala @@ -61,7 +61,7 @@ object StreamSupport { spliterator: Spliterator[T], parallel: Boolean ): Stream[T] = { - new ObjectStreamImpl[T](spliterator, parallel) + new StreamImpl[T](spliterator, parallel) } def stream[T]( @@ -69,6 +69,6 @@ object StreamSupport { characteristics: Int, parallel: Boolean ): Stream[T] = { - new ObjectStreamImpl[T](supplier, characteristics, parallel) + new StreamImpl[T](supplier, characteristics, parallel) } } diff --git a/junit-test/outputs/scala/scalanative/junit/AssertEqualsDoubleTestAssertions_.txt b/junit-test/outputs/scala/scalanative/junit/AssertEqualsDoubleTestAssertions_.txt index b9b41bdec5..818d8df0ef 100644 --- a/junit-test/outputs/scala/scalanative/junit/AssertEqualsDoubleTestAssertions_.txt +++ b/junit-test/outputs/scala/scalanative/junit/AssertEqualsDoubleTestAssertions_.txt @@ -1,12 +1,10 @@ ldTest run started ldTest scala.scalanative.junit.AssertEqualsDoubleTest.failsWithDouble started -leTest scala.scalanative.junit.AssertEqualsDoubleTest.failsWithDouble failed: Use assertEquals(expected, actual, delta) to compare floating-point numbers, took