From 818d672e1b5f3153beb1c3bc656c077c7d1f58d8 Mon Sep 17 00:00:00 2001
From: Shivam Mishra <124146945+shmishra99@users.noreply.github.com>
Date: Tue, 6 Feb 2024 00:11:41 +0530
Subject: [PATCH 01/33] Fixed typos in documentation string. (#7782)
* Migrate stale management probot to Github action
* Migrate stale management probot to Github action
* Fixed typos in documentation string.
* Fixed typos in documentation string.
---------
Co-authored-by: Matthew Soulanille
Co-authored-by: Linchenn <40653845+Linchenn@users.noreply.github.com>
Co-authored-by: Ping Yu <4018+pyu10055@users.noreply.github.com>
---
tfjs-converter/src/executor/graph_executor.ts | 2 +-
tfjs-converter/src/executor/graph_model.ts | 2 +-
tfjs-converter/src/executor/tensor_array.ts | 2 +-
.../src/operations/executors/hash_table_executor_test.ts | 2 +-
tfjs-tfdf/BUILD.bazel | 2 +-
tfjs-tfdf/README.md | 2 +-
tfjs-vis/demos/mnist/index.html | 4 ++--
tfjs-vis/src/components/surface.tsx | 2 +-
tfjs-vis/src/components/visor.tsx | 2 +-
tfjs-vis/src/types.ts | 4 ++--
10 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/tfjs-converter/src/executor/graph_executor.ts b/tfjs-converter/src/executor/graph_executor.ts
index 52f7d481e8a..da34b1d692c 100644
--- a/tfjs-converter/src/executor/graph_executor.ts
+++ b/tfjs-converter/src/executor/graph_executor.ts
@@ -452,7 +452,7 @@ export class GraphExecutor implements FunctionExecutor {
* @param isFunctionExecution Optional. Flag for executing a function.
* @param tensorArrayMap Optional, global TensorArray map by id. Used for
* function execution.
- * @param tensorArrayMap Optinal global TensorList map by id. Used for
+ * @param tensorArrayMap Optional global TensorList map by id. Used for
* function execution.
*/
private async _executeAsync(
diff --git a/tfjs-converter/src/executor/graph_model.ts b/tfjs-converter/src/executor/graph_model.ts
index 95334467706..70a26719a65 100644
--- a/tfjs-converter/src/executor/graph_model.ts
+++ b/tfjs-converter/src/executor/graph_model.ts
@@ -319,7 +319,7 @@ export class GraphModel implements
* Execute the inference for the input tensors.
*
* @param input The input tensors, when there is single input for the model,
- * inputs param should be a `tf.Tensor`. For models with mutliple inputs,
+ * inputs param should be a `tf.Tensor`. For models with multiple inputs,
* inputs params should be in either `tf.Tensor`[] if the input order is
* fixed, or otherwise NamedTensorMap format.
*
diff --git a/tfjs-converter/src/executor/tensor_array.ts b/tfjs-converter/src/executor/tensor_array.ts
index b69553ffb48..21344cbc7c3 100644
--- a/tfjs-converter/src/executor/tensor_array.ts
+++ b/tfjs-converter/src/executor/tensor_array.ts
@@ -240,7 +240,7 @@ export class TensorArray {
/**
* Scatter the values of a Tensor in specific indices of a TensorArray.
- * @param indices nummber[] values in [0, max_value). If the
+ * @param indices number[] values in [0, max_value). If the
* TensorArray is not dynamic, max_value=size().
* @param tensor Tensor input tensor.
*/
diff --git a/tfjs-converter/src/operations/executors/hash_table_executor_test.ts b/tfjs-converter/src/operations/executors/hash_table_executor_test.ts
index 64cc3d12ed6..1d4b1972f78 100644
--- a/tfjs-converter/src/operations/executors/hash_table_executor_test.ts
+++ b/tfjs-converter/src/operations/executors/hash_table_executor_test.ts
@@ -262,7 +262,7 @@ describe('hash_table', () => {
const before = memory().numTensors;
try {
await executeOp(node, {input3, input5}, context, resourceManager);
- fail('Shoudl fail, succeed unexpectedly.');
+ fail('Should fail, succeed unexpectedly.');
} catch (err) {
expect(err).toMatch(/Expect key dtype/);
}
diff --git a/tfjs-tfdf/BUILD.bazel b/tfjs-tfdf/BUILD.bazel
index b133797af4d..b9f65832065 100644
--- a/tfjs-tfdf/BUILD.bazel
+++ b/tfjs-tfdf/BUILD.bazel
@@ -48,7 +48,7 @@ tfjs_bundle(
],
)
-# Copy ouput files to dist/.
+# Copy output files to dist/.
copy_ts_library_to_dist(
name = "copy_src_to_dist",
srcs = [
diff --git a/tfjs-tfdf/README.md b/tfjs-tfdf/README.md
index 81d196f44d6..7096864fb29 100644
--- a/tfjs-tfdf/README.md
+++ b/tfjs-tfdf/README.md
@@ -1,6 +1,6 @@
# Tensorflow Decision Forests support for Tensorflow.js
-This package enables users to run arbitary Tensorflow Decision Forests models
+This package enables users to run arbitrary Tensorflow Decision Forests models
on the web that are converted using tfjs-converter.
Users can load a TFDF model from a URL, use TFJS tensors to set
the model's input data, run inference, and get the output back in TFJS tensors.
diff --git a/tfjs-vis/demos/mnist/index.html b/tfjs-vis/demos/mnist/index.html
index 6ecf19ea990..b51a164746b 100644
--- a/tfjs-vis/demos/mnist/index.html
+++ b/tfjs-vis/demos/mnist/index.html
@@ -94,7 +94,7 @@ The Visor
` (backtick): Shows or hides the visor
- ~ (tilde, shift+backtick): Toggles betweeen the two sizes the visor supports
+ ~ (tilde, shift+backtick): Toggles between the two sizes the visor supports
The API allows you to disable (unbind) these keyboard shortcuts.
@@ -317,7 +317,7 @@ Customizing training charts.
Evaluating Our Model
- Now that our model is trained we should evalute its performance. For a classification task like this one we can
+ Now that our model is trained we should evaluate its performance. For a classification task like this one we can
use the `perClassAccuracy`
and `confusionMatrix` functions. These are demonstrated below.
diff --git a/tfjs-vis/src/components/surface.tsx b/tfjs-vis/src/components/surface.tsx
index 6eb67bd5f62..56ed114692c 100644
--- a/tfjs-vis/src/components/surface.tsx
+++ b/tfjs-vis/src/components/surface.tsx
@@ -27,7 +27,7 @@ interface SurfaceProps extends SurfaceInfoStrict {
}
/**
- * A surface is container for visualizations and other rendered thigns.
+ * A surface is container for visualizations and other rendered things.
* It consists of a containing DOM Element, a label and an empty drawArea.
*/
export class SurfaceComponent extends Component {
diff --git a/tfjs-vis/src/components/visor.tsx b/tfjs-vis/src/components/visor.tsx
index d9df73665f7..4a7ff16e85f 100644
--- a/tfjs-vis/src/components/visor.tsx
+++ b/tfjs-vis/src/components/visor.tsx
@@ -28,7 +28,7 @@ interface VisorProps {
// objects that allow configuration of the surface. The actual surface
// instance is managed by the visor.
surfaceList: SurfaceInfoStrict[];
- // Whether to inialize the visor to the open or closed state. Optional.
+ // Whether to initialize the visor to the open or closed state. Optional.
startOpen?: boolean;
// A ref handler
ref?: (r: VisorComponent) => void;
diff --git a/tfjs-vis/src/types.ts b/tfjs-vis/src/types.ts
index c06a47d4b34..7af39c38f41 100644
--- a/tfjs-vis/src/types.ts
+++ b/tfjs-vis/src/types.ts
@@ -141,11 +141,11 @@ export interface VisOptions {
*/
export interface XYPlotOptions extends VisOptions {
/**
- * domain of the x axis. Overriden by zoomToFit
+ * domain of the x axis. Overridden by zoomToFit
*/
xAxisDomain?: [number, number];
/**
- * domain of the y axis. Overriden by zoomToFit
+ * domain of the y axis. Overridden by zoomToFit
*/
yAxisDomain?: [number, number];
/**
From f0f981fe306bf548e300536aca485c0ffdd6619e Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Tue, 27 Feb 2024 23:52:08 +0530
Subject: [PATCH 02/33] Address typos in scripts documentation strings (#8185)
DOC
---
scripts/cloud_funcs/README.md | 14 +++++++++-----
scripts/generate_cloudbuild.ts | 2 +-
scripts/make-version.js | 6 +++---
scripts/publish-npm.ts | 2 +-
scripts/release-tfjs.ts | 4 ++--
scripts/release-util.ts | 2 +-
scripts/release.ts | 4 ++--
scripts/start_local_debugger_server.js | 2 +-
8 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/scripts/cloud_funcs/README.md b/scripts/cloud_funcs/README.md
index 359f3cc48b5..1b510fdbde4 100644
--- a/scripts/cloud_funcs/README.md
+++ b/scripts/cloud_funcs/README.md
@@ -1,10 +1,12 @@
This directory contains the following Google Cloud Functions.
### `trigger_nightly`
-Programatically triggers a Cloud Build on master. This function is called by the Cloud Scheduler around 4am "America/New York" time every day (configurable via the Cloud Scheduler UI).
+
+Programmatically triggers a Cloud Build on master. This function is called by the Cloud Scheduler around 4am "America/New York" time every day (configurable via the Cloud Scheduler UI).
You can also trigger the function manually via the Cloud UI.
Command to re-deploy:
+
```sh
gcloud functions deploy nightly_tfjs \
--runtime nodejs14 \
@@ -15,6 +17,7 @@ If a build was triggered by nightly, there is a substitution variable `_NIGHTLY=
You can forward the substitution as the `NIGHTLY` environment variable so the scripts can use it, by specifying `env: ['NIGHTLY=$_NIGHTLY']` in `cloudbuild.yml`. E.g. `integration_tests/benchmarks/benchmark_cloud.sh` uses the `NIGHTLY` bit to always run on nightly.
### `send_email`
+
Sends an email and a chat message with the nightly build status. Every build sends a message to the `cloud-builds` topic with its build information. The `send_email` function is subscribed to that topic and ignores all builds (e.g. builds triggered by pull requests) **except** for the nightly build and sends an email to an internal mailing list with its build status around 4:40am.
Command to re-deploy:
@@ -28,6 +31,7 @@ gcloud functions deploy send_email \
```
### `sync_reactnative`
+
Makes a request to browserStack to sync the current build of the tfjs-react-native integration app to browserstack. The app itself is stored in a GCP bucket. This needs to be done at least once every 30 days and is triggered via cloud scheduler via the `sync_reactnative` topic.
Currently set to run weekly on Thursdays at 3AM.
@@ -44,7 +48,7 @@ gcloud functions deploy sync_reactnative \
The pipeline looks like this:
-1) At 4am, Cloud Scheduler writes to `nightly_tfjs` topic
-2) That triggers the `nightly_tfjs` function, which starts a build programatically
-3) That build runs and writes its status to `cloud-builds` topic
-4) That triggers the `send_email` function, which sends email and chat with the build status.
+1. At 4am, Cloud Scheduler writes to `nightly_tfjs` topic
+2. That triggers the `nightly_tfjs` function, which starts a build programmatically
+3. That build runs and writes its status to `cloud-builds` topic
+4. That triggers the `send_email` function, which sends email and chat with the build status.
diff --git a/scripts/generate_cloudbuild.ts b/scripts/generate_cloudbuild.ts
index d9450cf8bfb..0bce012deff 100644
--- a/scripts/generate_cloudbuild.ts
+++ b/scripts/generate_cloudbuild.ts
@@ -33,7 +33,7 @@ interface CloudbuildStep {
const CUSTOM_PROPS = new Set(['nightlyOnly', 'waitedForByPackages']);
interface CustomCloudbuildStep extends CloudbuildStep {
nightlyOnly?: boolean; // Only run during nightly tests
- waitedForByPackages?: boolean; // Other non-bazel pacakges `waitFor` this step
+ waitedForByPackages?: boolean; // Other non-bazel packages `waitFor` this step
}
function removeCustomProps(step: CustomCloudbuildStep): CloudbuildStep {
diff --git a/scripts/make-version.js b/scripts/make-version.js
index 77ac3458ec5..91766146849 100755
--- a/scripts/make-version.js
+++ b/scripts/make-version.js
@@ -43,7 +43,7 @@ fs.writeFile(path.join(dirName, 'src/version.ts'), versionCode, err => {
if (err) {
throw new Error(`Could not save version file ${version}: ${err}`);
}
- console.log(`Version file for version ${version} saved sucessfully.`);
+ console.log(`Version file for version ${version} saved successfully.`);
});
if (dirName === 'tfjs-converter') {
@@ -60,7 +60,7 @@ version = '${version}'
throw new Error(`Could not save pip version file ${version}: ${err}`);
}
console.log(
- `Version file for pip version ${version} saved sucessfully.`);
+ `Version file for pip version ${version} saved successfully.`);
});
const buildFilename = path.join(dirName, '/python/BUILD.bazel');
@@ -74,6 +74,6 @@ version = '${version}'
fs.writeFileSync(buildFilename, newValue, 'utf-8');
console.log(
- `pip version ${version} for BUILD.bazel file is updated sucessfully.`);
+ `pip version ${version} for BUILD.bazel file is updated successfully.`);
});
}
diff --git a/scripts/publish-npm.ts b/scripts/publish-npm.ts
index d62f78b0029..4aaa696c5a8 100755
--- a/scripts/publish-npm.ts
+++ b/scripts/publish-npm.ts
@@ -77,7 +77,7 @@ function setDifference(a: Set, b: Set): Set {
const parser = new argparse.ArgumentParser();
parser.addArgument('--git-protocol', {
action: 'storeTrue',
- help: 'Use the git protocal rather than the http protocol when cloning repos.'
+ help: 'Use the git protocol rather than the http protocol when cloning repos.'
});
parser.addArgument('--registry', {
diff --git a/scripts/release-tfjs.ts b/scripts/release-tfjs.ts
index c4c37251f33..7975501b1fe 100644
--- a/scripts/release-tfjs.ts
+++ b/scripts/release-tfjs.ts
@@ -225,7 +225,7 @@ async function main() {
fs.writeFileSync(packageJsonPath, pkg);
// Update dependency versions of all package.json files found in the
- // package to use the new verison numbers (except ones in node_modules).
+ // package to use the new version numbers (except ones in node_modules).
const subpackages =
$(`find ${
packagePath} -name package.json -not -path \'*/node_modules/*\'`)
@@ -289,7 +289,7 @@ async function main() {
'YARN_REGISTRY="https://registry.npmjs.org/" yarn publish-npm ' +
'after you merge the PR.' +
'Remember to delete the dev branch once PR is merged.' +
- 'Please remeber to update the website once you have released ' +
+ 'Please remember to update the website once you have released ' +
'a new package version.');
if (args.dry) {
diff --git a/scripts/release-util.ts b/scripts/release-util.ts
index 5e6107fb260..9e03078063e 100755
--- a/scripts/release-util.ts
+++ b/scripts/release-util.ts
@@ -415,7 +415,7 @@ export async function getReleaseBranch(name: string): Promise {
Array.from(branchesStr.split(/\n/)).map(line => line.toString().trim());
// Find the latest matching branch, e.g. tfjs_1.7.1
- // It will not match temprary generated branches such as tfjs_1.7.1_phase0.
+ // It will not match temporary generated branches such as tfjs_1.7.1_phase0.
const exp = '^' + name + '_([^_]+)$';
const regObj = new RegExp(exp);
const maybeBranch = branches.find(branch => branch.match(regObj));
diff --git a/scripts/release.ts b/scripts/release.ts
index c0aa9e8fb22..0644db310a0 100644
--- a/scripts/release.ts
+++ b/scripts/release.ts
@@ -38,7 +38,7 @@ const parser = new argparse.ArgumentParser();
parser.addArgument('--git-protocol', {
action: 'storeTrue',
- help: 'Use the git protocal rather than the http protocol when cloning repos.'
+ help: 'Use the git protocol rather than the http protocol when cloning repos.'
});
async function main() {
@@ -174,7 +174,7 @@ async function main() {
`Please publish by running ` +
`YARN_REGISTRY="https://registry.npmjs.org/" yarn publish-npm ` +
`after you merge the PR.` +
- `Please remeber to update the website once you have released ` +
+ `Please remember to update the website once you have released ` +
'a new package version');
process.exit(0);
diff --git a/scripts/start_local_debugger_server.js b/scripts/start_local_debugger_server.js
index 17ad7c9ab06..1f0c6e05753 100644
--- a/scripts/start_local_debugger_server.js
+++ b/scripts/start_local_debugger_server.js
@@ -118,7 +118,7 @@ parser.addArgument('--port', {
});
parser.addArgument('--version', {
- help: `The verison of the bundle. Default: ${DEFAULT_VERSION}`,
+ help: `The version of the bundle. Default: ${DEFAULT_VERSION}`,
defaultValue: DEFAULT_VERSION,
type: 'string',
});
From c465681739aa2927975dd8d0becd3d17e21cebbb Mon Sep 17 00:00:00 2001
From: Ping Yu <4018+pyu10055@users.noreply.github.com>
Date: Sat, 6 Apr 2024 00:56:02 -0700
Subject: [PATCH 03/33] nightly test fix (#8214)
* testing nightly fix
* disable mobilenet test
* fix converter and e2e tests
---
e2e/integration_tests/convert_predict.py | 29 +-
remote-execution/BUILD.bazel | 2 +-
tfjs-converter/python/BUILD.bazel | 3 +-
.../python/requirements-dev_lock.txt | 274 ++++++++++--------
tfjs-converter/python/requirements.txt | 5 +-
tfjs-converter/python/requirements_lock.txt | 274 ++++++++++--------
.../python/tensorflowjs/BUILD.bazel | 6 +
.../tensorflowjs/converters/BUILD.bazel | 9 +
.../tensorflowjs/converters/converter.py | 25 +-
.../tensorflowjs/converters/converter_test.py | 121 ++++----
.../converters/fuse_depthwise_conv2d_test.py | 23 +-
.../converters/fuse_prelu_test.py | 33 ++-
.../converters/generate_test_model.py | 7 +-
.../converters/keras_h5_conversion_test.py | 112 +++----
.../converters/keras_tfjs_loader.py | 9 +-
.../converters/keras_tfjs_loader_test.py | 83 +++---
.../tf_saved_model_conversion_v2_test.py | 139 ++-------
.../python/tensorflowjs/converters/wizard.py | 2 +-
.../tensorflowjs/converters/wizard_test.py | 19 +-
.../python/test_nightly_pip_package.py | 5 +-
tfjs-converter/python/test_pip_package.py | 67 ++---
21 files changed, 611 insertions(+), 636 deletions(-)
diff --git a/e2e/integration_tests/convert_predict.py b/e2e/integration_tests/convert_predict.py
index a44b96aaaa4..84667789b7e 100644
--- a/e2e/integration_tests/convert_predict.py
+++ b/e2e/integration_tests/convert_predict.py
@@ -37,6 +37,7 @@
import numpy as np
import tensorflow as tf
+import tf_keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -232,16 +233,16 @@ def _create_saved_model_with_conv2d(save_dir):
save_dir: directory name of where the saved model will be stored.
"""
layers = [
- tf.keras.layers.Conv2D(
+ tf_keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=False),
- tf.keras.layers.BatchNormalization(),
- tf.keras.layers.ReLU()
+ tf_keras.layers.BatchNormalization(),
+ tf_keras.layers.ReLU()
]
- model = tf.keras.Sequential(layers)
+ model = tf_keras.Sequential(layers)
result = model.predict(tf.ones((1, 24, 24, 3)))
# set the learning phase to avoid keara learning placeholder, which
# will cause error when saving.
- tf.keras.backend.set_learning_phase(0)
+ #tf_keras.backend.set_learning_phase(0)
tf.saved_model.save(model, save_dir)
return {
"async": False,
@@ -263,14 +264,14 @@ def _create_saved_model_with_prelu(save_dir):
# set the bias and alpha intitialize to make them constant and ensure grappler
# be able to fuse the op.
layers = [
- tf.keras.layers.Conv2D(
+ tf_keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
+ tf_keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
]
- model = tf.keras.Sequential(layers)
+ model = tf_keras.Sequential(layers)
result = model.predict(tf.ones((1, 24, 24, 3)))
- tf.keras.backend.set_learning_phase(0)
+ #tf_keras.backend.set_learning_phase(0)
tf.saved_model.save(model, save_dir)
return {
"async": False,
@@ -351,13 +352,13 @@ def _create_saved_model_v2_with_tensorlist_ops(save_dir):
Args:
save_dir: directory name of where the saved model will be stored.
"""
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Embedding(100, 20, input_shape=[10]))
- model.add(tf.keras.layers.GRU(4))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Embedding(100, 20, input_shape=[10]))
+ model.add(tf_keras.layers.GRU(4))
result = model.predict(tf.ones([1, 10]))
- tf.keras.backend.set_learning_phase(0)
+ #tf_keras.backend.set_learning_phase(0)
tf.saved_model.save(model, save_dir)
return {
@@ -469,7 +470,7 @@ def lookup(input):
}
def _layers_mobilenet():
- model = tf.keras.applications.MobileNetV2()
+ model = tf_keras.applications.MobileNetV2()
model_path = 'mobilenet'
tfjs.converters.save_keras_model(model, os.path.join(
_tmp_dir, model_path))
diff --git a/remote-execution/BUILD.bazel b/remote-execution/BUILD.bazel
index 21703436a1a..1fadc5d84d5 100755
--- a/remote-execution/BUILD.bazel
+++ b/remote-execution/BUILD.bazel
@@ -9,7 +9,7 @@ platform(
],
exec_properties = {
# We use the same docker image for remote builds as we use for CI testing.
- "container-image": "docker://gcr.io/learnjs-174218/release@sha256:d85abab6146eaf1e01312bdb9e353a5efa0508b913dccf30fc5e505d009026ff",
+ "container-image": "docker://gcr.io/learnjs-174218/release:latest@sha256:f712eae902a364750727f1bc2e4bfc3f75be846e2277f4e8026f9c03752f00e4",
# By default in Google Cloud Remote build execution, network access is disabled. We explicitly set the
# property in the platform again in case the default ever changes. Network access is not desirable in
# Bazel builds as it is potential source of flaky tests and therefore also breaks hermeticity.
diff --git a/tfjs-converter/python/BUILD.bazel b/tfjs-converter/python/BUILD.bazel
index 3df31fc7512..e670451ea1e 100644
--- a/tfjs-converter/python/BUILD.bazel
+++ b/tfjs-converter/python/BUILD.bazel
@@ -71,9 +71,10 @@ py_wheel(
"jax>=0.4.13",
"jaxlib>=0.4.13",
"tensorflow>=2.13.0,<3",
+ "tf-keras>=2.13.0",
"tensorflow-decision-forests>=1.5.0",
"six>=1.16.0,<2",
- "tensorflow-hub>=0.14.0",
+ "tensorflow-hub>=0.16.1",
"packaging~=23.1",
],
strip_path_prefixes = [
diff --git a/tfjs-converter/python/requirements-dev_lock.txt b/tfjs-converter/python/requirements-dev_lock.txt
index ea498bca0c5..e1d387023ff 100644
--- a/tfjs-converter/python/requirements-dev_lock.txt
+++ b/tfjs-converter/python/requirements-dev_lock.txt
@@ -9,6 +9,7 @@ absl-py==1.3.0 \
--hash=sha256:463c38a08d2e4cef6c498b76ba5bd4858e4c6ef51da1a5a1f27139a022e20248
# via
# chex
+ # keras
# optax
# orbax-checkpoint
# tensorboard
@@ -26,10 +27,6 @@ cached-property==1.5.2 \
--hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
--hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0
# via orbax-checkpoint
-cachetools==5.2.0 \
- --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
- --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
- # via google-auth
certifi==2022.12.7 \
--hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
--hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
@@ -106,16 +103,6 @@ gast==0.4.0 \
--hash=sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1 \
--hash=sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4
# via tensorflow
-google-auth==2.15.0 \
- --hash=sha256:6897b93556d8d807ad70701bb89f000183aea366ca7ed94680828b37437a4994 \
- --hash=sha256:72f12a6cfc968d754d7bdab369c5c5c16032106e52d32c6dfd8484e4c01a6d1f
- # via
- # google-auth-oauthlib
- # tensorboard
-google-auth-oauthlib==1.0.0 \
- --hash=sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb \
- --hash=sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5
- # via tensorboard
google-pasta==0.2.0 \
--hash=sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954 \
--hash=sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed \
@@ -170,28 +157,35 @@ grpcio==1.51.1 \
# via
# tensorboard
# tensorflow
-h5py==3.7.0 \
- --hash=sha256:03d64fb86bb86b978928bad923b64419a23e836499ec6363e305ad28afd9d287 \
- --hash=sha256:04e2e1e2fc51b8873e972a08d2f89625ef999b1f2d276199011af57bb9fc7851 \
- --hash=sha256:0798a9c0ff45f17d0192e4d7114d734cac9f8b2b2c76dd1d923c4d0923f27bb6 \
- --hash=sha256:0a047fddbe6951bce40e9cde63373c838a978c5e05a011a682db9ba6334b8e85 \
- --hash=sha256:0d8de8cb619fc597da7cf8cdcbf3b7ff8c5f6db836568afc7dc16d21f59b2b49 \
- --hash=sha256:1fcb11a2dc8eb7ddcae08afd8fae02ba10467753a857fa07a404d700a93f3d53 \
- --hash=sha256:3fcf37884383c5da64846ab510190720027dca0768def34dd8dcb659dbe5cbf3 \
- --hash=sha256:43fed4d13743cf02798a9a03a360a88e589d81285e72b83f47d37bb64ed44881 \
- --hash=sha256:63beb8b7b47d0896c50de6efb9a1eaa81dbe211f3767e7dd7db159cea51ba37a \
- --hash=sha256:6776d896fb90c5938de8acb925e057e2f9f28755f67ec3edcbc8344832616c38 \
- --hash=sha256:9e2ad2aa000f5b1e73b5dfe22f358ca46bf1a2b6ca394d9659874d7fc251731a \
- --hash=sha256:9e7535df5ee3dc3e5d1f408fdfc0b33b46bc9b34db82743c82cd674d8239b9ad \
- --hash=sha256:a9351d729ea754db36d175098361b920573fdad334125f86ac1dd3a083355e20 \
- --hash=sha256:c038399ce09a58ff8d89ec3e62f00aa7cb82d14f34e24735b920e2a811a3a426 \
- --hash=sha256:d77af42cb751ad6cc44f11bae73075a07429a5cf2094dfde2b1e716e059b3911 \
- --hash=sha256:e5b7820b75f9519499d76cc708e27242ccfdd9dfb511d6deb98701961d0445aa \
- --hash=sha256:ed43e2cc4f511756fd664fb45d6b66c3cbed4e3bd0f70e29c37809b2ae013c44 \
- --hash=sha256:f084bbe816907dfe59006756f8f2d16d352faff2d107f4ffeb1d8de126fc5dc7 \
- --hash=sha256:f514b24cacdd983e61f8d371edac8c1b780c279d0acb8485639e97339c866073 \
- --hash=sha256:f73307c876af49aa869ec5df1818e9bb0bdcfcf8a5ba773cc45a4fba5a286a5c
- # via tensorflow
+h5py==3.10.0 \
+ --hash=sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c \
+ --hash=sha256:212bb997a91e6a895ce5e2f365ba764debeaef5d2dca5c6fb7098d66607adf99 \
+ --hash=sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc \
+ --hash=sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339 \
+ --hash=sha256:3074ec45d3dc6e178c6f96834cf8108bf4a60ccb5ab044e16909580352010a97 \
+ --hash=sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3 \
+ --hash=sha256:43a61b2c2ad65b1fabc28802d133eed34debcc2c8b420cb213d3d4ef4d3e2229 \
+ --hash=sha256:492305a074327e8d2513011fa9fffeb54ecb28a04ca4c4227d7e1e9616d35641 \
+ --hash=sha256:5dfc65ac21fa2f630323c92453cadbe8d4f504726ec42f6a56cf80c2f90d6c52 \
+ --hash=sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd \
+ --hash=sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52 \
+ --hash=sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03 \
+ --hash=sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20 \
+ --hash=sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7 \
+ --hash=sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684 \
+ --hash=sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f \
+ --hash=sha256:9450464b458cca2c86252b624279115dcaa7260a40d3cb1594bf2b410a2bd1a3 \
+ --hash=sha256:ae2f0201c950059676455daf92700eeb57dcf5caaf71b9e1328e6e6593601770 \
+ --hash=sha256:aece0e2e1ed2aab076c41802e50a0c3e5ef8816d60ece39107d68717d4559824 \
+ --hash=sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f \
+ --hash=sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039 \
+ --hash=sha256:d4682b94fd36ab217352be438abd44c8f357c5449b8995e63886b431d260f3d3 \
+ --hash=sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049 \
+ --hash=sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d \
+ --hash=sha256:fd6f6d1384a9f491732cee233b99cd4bfd6e838a8815cc86722f9d2ee64032af
+ # via
+ # keras
+ # tensorflow
idna==3.4 \
--hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
--hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
@@ -247,9 +241,9 @@ jaxlib==0.4.23 \
# chex
# optax
# orbax-checkpoint
-keras==2.13.1 \
- --hash=sha256:5ce5f706f779fa7330e63632f327b75ce38144a120376b2ae1917c00fa6136af \
- --hash=sha256:5df12cc241a015a11b65ddb452c0eeb2744fce21d9b54ba48db87492568ccc68
+keras==3.1.1 \
+ --hash=sha256:55558ea228dc38e7667874fd2e83eaf7faeb026e2e8615b36a8616830f7e303b \
+ --hash=sha256:b5d45f0b5116b11db502da00bd501592364325d01724e6cb2032711e3e32677e
# via tensorflow
lazy-object-proxy==1.8.0 \
--hash=sha256:0c1c7c0433154bb7c54185714c6929acc0ba04ee1b167314a779b9025517eada \
@@ -334,27 +328,29 @@ mccabe==0.6.1 \
--hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
--hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
# via pylint
-ml-dtypes==0.2.0 \
- --hash=sha256:022d5a4ee6be14569c2a9d1549e16f1ec87ca949681d0dca59995445d5fcdd5b \
- --hash=sha256:1749b60348da71fd3c2ab303fdbc1965958dc50775ead41f5669c932a341cafd \
- --hash=sha256:32107e7fa9f62db9a5281de923861325211dfff87bd23faefb27b303314635ab \
- --hash=sha256:35b984cddbe8173b545a0e3334fe56ea1a5c3eb67c507f60d0cfde1d3fa8f8c2 \
- --hash=sha256:36d28b8861a8931695e5a31176cad5ae85f6504906650dea5598fbec06c94606 \
- --hash=sha256:50845af3e9a601810751b55091dee6c2562403fa1cb4e0123675cf3a4fc2c17a \
- --hash=sha256:6488eb642acaaf08d8020f6de0a38acee7ac324c1e6e92ee0c0fea42422cb797 \
- --hash=sha256:75015818a7fccf99a5e8ed18720cb430f3e71a8838388840f4cdf225c036c983 \
- --hash=sha256:80d304c836d73f10605c58ccf7789c171cc229bfb678748adfb7cea2510dfd0e \
- --hash=sha256:832a019a1b6db5c4422032ca9940a990fa104eee420f643713241b3a518977fa \
- --hash=sha256:8faaf0897942c8253dd126662776ba45f0a5861968cf0f06d6d465f8a7bc298a \
- --hash=sha256:bc29a0524ef5e23a7fbb8d881bdecabeb3fc1d19d9db61785d077a86cb94fab2 \
- --hash=sha256:df6a76e1c8adf484feb138ed323f9f40a7b6c21788f120f7c78bec20ac37ee81 \
- --hash=sha256:e70047ec2c83eaee01afdfdabee2c5b0c133804d90d0f7db4dd903360fcc537c \
- --hash=sha256:e85ba8e24cf48d456e564688e981cf379d4c8e644db0a2f719b78de281bac2ca \
- --hash=sha256:f00c71c8c63e03aff313bc6a7aeaac9a4f1483a921a6ffefa6d4404efd1af3d0 \
- --hash=sha256:f08c391c2794f2aad358e6f4c70785a9a7b1df980ef4c232b3ccd4f6fe39f719
+ml-dtypes==0.3.2 \
+ --hash=sha256:2c34f2ba9660b21fe1034b608308a01be82bbef2a92fb8199f24dc6bad0d5226 \
+ --hash=sha256:3a17ef2322e60858d93584e9c52a5be7dd6236b056b7fa1ec57f1bb6ba043e33 \
+ --hash=sha256:533059bc5f1764fac071ef54598db358c167c51a718f68f5bb55e3dee79d2967 \
+ --hash=sha256:6604877d567a29bfe7cc02969ae0f2425260e5335505cf5e7fefc3e5465f5655 \
+ --hash=sha256:6b35c4e8ca957c877ac35c79ffa77724ecc3702a1e4b18b08306c03feae597bb \
+ --hash=sha256:763697ab8a88d47443997a7cdf3aac7340049aed45f7521f6b0ec8a0594821fe \
+ --hash=sha256:7a4c3fcbf86fa52d0204f07cfd23947ef05b4ad743a1a988e163caa34a201e5e \
+ --hash=sha256:7afde548890a92b41c0fed3a6c525f1200a5727205f73dc21181a2726571bb53 \
+ --hash=sha256:7ba8e1fafc7fff3e643f453bffa7d082df1678a73286ce8187d3e825e776eb94 \
+ --hash=sha256:91f8783fd1f2c23fd3b9ee5ad66b785dafa58ba3cdb050c4458021fa4d1eb226 \
+ --hash=sha256:93b78f53431c93953f7850bb1b925a17f0ab5d97527e38a7e865b5b4bc5cfc18 \
+ --hash=sha256:961134ea44c7b8ca63eda902a44b58cd8bd670e21d62e255c81fba0a8e70d9b7 \
+ --hash=sha256:b89b194e9501a92d289c1ffd411380baf5daafb9818109a4f49b0a1b6dce4462 \
+ --hash=sha256:c7b3fb3d4f6b39bcd4f6c4b98f406291f0d681a895490ee29a0f95bab850d53c \
+ --hash=sha256:d1a746fe5fb9cd974a91070174258f0be129c592b93f9ce7df6cc336416c3fbd \
+ --hash=sha256:e8505946df1665db01332d885c2020b4cb9e84a8b1241eb4ba69d59591f65855 \
+ --hash=sha256:f47619d978ab1ae7dfdc4052ea97c636c6263e1f19bd1be0e42c346b98d15ff4
# via
# jax
# jaxlib
+ # keras
+ # tensorflow
msgpack==1.0.4 \
--hash=sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467 \
--hash=sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae \
@@ -411,6 +407,10 @@ msgpack==1.0.4 \
# via
# flax
# orbax-checkpoint
+namex==0.0.7 \
+ --hash=sha256:84ba65bc4d22bd909e3d26bf2ffb4b9529b608cb3f9a4336f776b04204ced69b \
+ --hash=sha256:8a4f062945f405d77cb66b907f16aa2fd83681945e998be840eb6c4154d40108
+ # via keras
nest-asyncio==1.5.7 \
--hash=sha256:5301c82941b550b3123a1ea772ba9a1c80bad3a182be8c1a5ae6ad3be57a9657 \
--hash=sha256:6a80f7b98f24d9083ed24608977c09dd608d83f91cccc24c9d2cba6d10e01c10
@@ -450,6 +450,7 @@ numpy==1.23.5 \
# h5py
# jax
# jaxlib
+ # keras
# ml-dtypes
# opt-einsum
# optax
@@ -461,10 +462,6 @@ numpy==1.23.5 \
# tensorflow-decision-forests
# tensorflow-hub
# tensorstore
-oauthlib==3.2.2 \
- --hash=sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca \
- --hash=sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918
- # via requests-oauthlib
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549
@@ -475,6 +472,47 @@ optax==0.1.4 \
--hash=sha256:12fcf33bd682f9a162a3deb097f864130c3224d76771af2ba09410de80399a9b \
--hash=sha256:fb7a0550d57a6636164a3de25986a8a19be8ff6431fcdf1225b4e05175810f22
# via flax
+optree==0.11.0 \
+ --hash=sha256:00a63f10d4a476e8e9aa2988daba9b2e88cb369c5aacc12545957d7d00bcd1a7 \
+ --hash=sha256:0db6968394096223881053dffdcaf2b8e220fd85db904f14aa931e4dc422c046 \
+ --hash=sha256:0df9a3923725aabb112ec7f10c74fa96b6c640da1cd30e7bc62fd4b03ef02875 \
+ --hash=sha256:162ed3ff2eb3f1c358e131e72c025f2b93d69b906e9057a811d014032ec71dc8 \
+ --hash=sha256:228b97e8c991739b10c8548c118747ba32ee765f88236342e492bf9648afc0bc \
+ --hash=sha256:234a4f8f97a1217f13390df7ac416771689749d9a1c8eda31bf8622cd333219e \
+ --hash=sha256:26b1230f9b75b579923a4f837c7c13db8b8d815cf68ce5af31dda5d818a877b2 \
+ --hash=sha256:2b3bb59324d635f2015bb3e237fd772b1fd548eee6cc80e008fbe0f092e9228d \
+ --hash=sha256:2bc08fb9691f43afc3a01119dead6b823ce3d7239e42fc3e47d4028eed50a6a2 \
+ --hash=sha256:31d444684ebd8c9f09a3d806fb3277843138ef9952b7a2954908e440e3b22519 \
+ --hash=sha256:39bed744a61e2f795e172d2853779ac59b8dea236982dc160ea22063afc99ca3 \
+ --hash=sha256:3cdc9fac9888d9eff11128ccfc4d4c10309163e372f312f7942ecee8df3d7824 \
+ --hash=sha256:4144126dd3c2ece2d2dd1d5e0b39fb91adf1c46f660c2c5a2df7f80666989d5d \
+ --hash=sha256:418850ceff364f51a6d81f32a1efd06a4e2d8df79a162e892685bc20c0aedd72 \
+ --hash=sha256:5e250144eacdd5813dec0b18d91df0229197e3be402db42fd8e254ec90ea343d \
+ --hash=sha256:5e5df0e8aaca124cc1ffca311786cc909810f3c046de090729cdafbf910082f8 \
+ --hash=sha256:63e020a34b7168b5d0701a265c7c95b07984ff699d4894b20fa601282be88f20 \
+ --hash=sha256:64c2e00fe508f50a42c50838df0d1f5be0dce5b4bef2373db8ad72b860211015 \
+ --hash=sha256:6a406eee5acd3fd4875fa44c3972d29ae6d4329e7296e9219986fe6ff8e92ea0 \
+ --hash=sha256:6cdd625dab2dff5374ff9c6792e8702fced8f0ea713ce959fc8f95499b5ecb2f \
+ --hash=sha256:6e8c3757088cd7fce666f2a5e031b65d7898e210452380d2657c0fc0a7ec9932 \
+ --hash=sha256:738e8bf4158e9c11cd051d89c2e453aeacf80ff8719ebc3251069015646554d0 \
+ --hash=sha256:8e6a46e95c3ea8546055087d6fe52a1dcd56de5182365f1469106cc72cdf3307 \
+ --hash=sha256:979ffc2b96f16595c219fb7a89597dd2fa00ac47a3b411fdcf8ae6821da52290 \
+ --hash=sha256:9bf322ad14f907ad4660ca286e731e750546d54934a94cc5ba7efe8860c60ab4 \
+ --hash=sha256:9d9d644e5448db9f32e2497487aca3bb2d3f92cbb50429a411ccda3f1f0968f3 \
+ --hash=sha256:a5f37bcfe4e363e3bb8d36c5698fb829546956b2fe88951994387162a1859625 \
+ --hash=sha256:a64df43fce2d8eeafd7db6e27447c56b3fa64842df847819684b3b1cc254c016 \
+ --hash=sha256:a91840f9d45e7c01f151ba1815ae32b4c3c21e4290298772ee4b13314f729856 \
+ --hash=sha256:b201a9405e250cf5770955863af2a236e382bdf5e4e086897ff03c41418c39da \
+ --hash=sha256:b26ac807d8993b7e43081b4b7bbb0378b4e5f3e6525daf923c470bc176cc3327 \
+ --hash=sha256:b8126d81ecb2c9e3554420834014ba343251f564c905ee3bef09d205b924b0c0 \
+ --hash=sha256:b9d236bc1491a5e366921b95fecc05aa6ff55989a81f2242cd11121b82c24503 \
+ --hash=sha256:bc17f9d085cd75a2de4f299a9c5e3c3520138eac7596061e581230b03862b44d \
+ --hash=sha256:d666099a78f7bf31bf3a520d6871ddcae65484bcff095fc4271a391553b09c75 \
+ --hash=sha256:e2d47bd28eff690eb2f7432e490265a291b04d6d346cf7b586491b2e2337bf97 \
+ --hash=sha256:ee208f0bec6436085a9fa3ae98af54bfcb8822086894fc1ade283e80a6f11fd7 \
+ --hash=sha256:f53951bfb640417558568284a8949d67bcdbf21fa0113107e20bd9403aa20b2b \
+ --hash=sha256:fa9ed745d4cbac5e15df70339b30867ba033542b87f7b734f4cacae5ec73ba00
+ # via keras
orbax-checkpoint==0.2.3 \
--hash=sha256:155e0a2dceef2901122e66585171e1dff4f4a4d9d2abe43a2b514279b9a3dabd \
--hash=sha256:a001bf48f1cebc635b07263fa546473ea48be3e278c50d5ade880b9aafb96f8a
@@ -537,16 +575,6 @@ protobuf==4.22.3 \
# tensorboard
# tensorflow
# tensorflow-hub
-pyasn1==0.4.8 \
- --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \
- --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba
- # via
- # pyasn1-modules
- # rsa
-pyasn1-modules==0.2.8 \
- --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \
- --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74
- # via google-auth
pygments==2.13.0 \
--hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \
--hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42
@@ -705,21 +733,13 @@ regex==2022.10.31 \
requests==2.28.1 \
--hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \
--hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349
- # via
- # requests-oauthlib
- # tensorboard
-requests-oauthlib==1.3.1 \
- --hash=sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5 \
- --hash=sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a
- # via google-auth-oauthlib
+ # via tensorflow
rich==12.6.0 \
--hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \
--hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0
- # via flax
-rsa==4.9 \
- --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
- --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
- # via google-auth
+ # via
+ # flax
+ # keras
scipy==1.9.3 \
--hash=sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31 \
--hash=sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108 \
@@ -758,63 +778,58 @@ six==1.16.0 \
# via
# -r tfjs-converter/python/requirements.txt
# astunparse
- # google-auth
# google-pasta
# prompt-toolkit
# python-dateutil
+ # tensorboard
# tensorflow
# tensorflow-decision-forests
-tensorboard==2.13.0 \
- --hash=sha256:ab69961ebddbddc83f5fa2ff9233572bdad5b883778c35e4fe94bf1798bd8481
+tensorboard==2.16.2 \
+ --hash=sha256:9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45
# via tensorflow
tensorboard-data-server==0.7.0 \
--hash=sha256:64aa1be7c23e80b1a42c13b686eb0875bb70f5e755f4d2b8de5c1d880cf2267f \
--hash=sha256:753d4214799b31da7b6d93837959abebbc6afa86e69eacf1e9a317a48daa31eb \
--hash=sha256:eb7fa518737944dbf4f0cf83c2e40a7ac346bf91be2e6a0215de98be74e85454
# via tensorboard
-tensorflow==2.13.0 \
- --hash=sha256:00060c5516a61e30c51936084ebc37091d116efe9ae74b2818cbd8b2006218e7 \
- --hash=sha256:06559eeaa69e6561cccbe2d02b015bcec663e875c8bbc4643f55692476e52147 \
- --hash=sha256:076d953a1508dc58bf95f30f58bcc9ee364b1353c61e143cb20c2dada91afb05 \
- --hash=sha256:11ad6a7ff49b4a690fbf37a5adaf28ba2686350a859c5f13c58dc8d2cc670375 \
- --hash=sha256:19ee67901702b26787ad685cca65730c163c101c0c2f238a2584d714e0fa8c25 \
- --hash=sha256:2822ac48c38d69b7fb104e606dacbd763c4bf5d3b20791f25be16a5076d01912 \
- --hash=sha256:5e0fdadec59de3d11c5b5129ddc38e739bde7aa13095b82e19d4380e14d04999 \
- --hash=sha256:6fff426661d286a4c634da44275d2ea2b951f392f3e65c8603681e7cb040586a \
- --hash=sha256:72d68b8c2f382e2d01b956c8ba516c0a7d5dad98111dd351bf82bfa646aa1c72 \
- --hash=sha256:7a08c0e2938ed5b642a8787678123827477b81d316055d5073fff82fa183eb82 \
- --hash=sha256:89125443e998548059c4e4a129dfab2b1ff7f2fd4c8eaed97842c3cd9b663101 \
- --hash=sha256:948003b5a23b72b3d89746d729e62ec5f01e47460f05521b2211d95069f569ba \
- --hash=sha256:9c04bc3023b6c4cfb9ee9759c3f03f21993891b4c345df52eb5519204fbf28c0 \
- --hash=sha256:b2978b39e8b3919059b5fd9e28508d50a77965d06ed0b537ed71c97de22dabdf \
- --hash=sha256:cbb83561bb7d55859eaefc70c674e58713d4e10c10927423ed836a5289bbfa86 \
- --hash=sha256:de77306c0c22c9d8754f54700752ac3a1efee895c5357308e6594436404bfbc0 \
- --hash=sha256:e0cf94d36ceaba8f158c6e15404a81fd5b3aa4cb04147c674cf55bd1aec78154 \
- --hash=sha256:e8f0b69ee2f800399fc6bc7ec55fecfa33662d136e425485959d90638f32a32a \
- --hash=sha256:fa7abe265cc3ebccc9b405a280bf674824c6d85df5e6ccfa985987b3c9d265b4 \
- --hash=sha256:fb2ff1129c93e853c19897d6a22ed0ec56387f5c6290ec03dec1c6f7b80bc396
+tensorflow==2.16.1 \
+ --hash=sha256:03b946e73bf48d857928329b8b321b00b42fe1b4f774c6580666683b0629689f \
+ --hash=sha256:093573a8eb93ef9511e7015b8de9659ed27156f2f05e6d1211f8f4cb76407ee1 \
+ --hash=sha256:09cac3c6a8fbf85a9b95491b58086154dd00a09956ed31823bb45c6605f0e881 \
+ --hash=sha256:1c5611e7357b7a4bc6dccc60750c91e27cdff82622fc917848f22add5ab8de26 \
+ --hash=sha256:1e96047657c64459a36a0cc211a3d003df96c7be3f95a84f7b705715f5697270 \
+ --hash=sha256:21a3c6d76a39f52754c389326f6bef8aef3c26b5bc89ca365add4a69483e569e \
+ --hash=sha256:42858b5d14159a2b9cc01c7f5a88e063b0601f20430cb358374005a67da38114 \
+ --hash=sha256:4a123fbb5788ba30d1113ce01bb166ddf85056fcb40e287c32a929ebfa4aa061 \
+ --hash=sha256:617df9fa2d697c4bc22fa3ee87eb01d580ab1bd0438fea15c4ec2f2870c40bb0 \
+ --hash=sha256:8231a9d7bba92a51231dcdcc3073920ad7d22fa88c64c7e2ecb7f1feac9d5fcb \
+ --hash=sha256:8e376ab46fb1df18a1f927d77011d36ecf7b717a81cbfe4a941c7bf5236939b3 \
+ --hash=sha256:92152aa77c402684e9066885515af6a45d88455c4453a818052c7369357078d8 \
+ --hash=sha256:930c61100cce3a5cb63d30fe6776504405214e8398a26ca968222ecb8b8f9404 \
+ --hash=sha256:ab79f156dd746c2dae906e3b4c5daac3855742941752e5a2c28f094c56eed466 \
+ --hash=sha256:ae0554471d472b8095f8a5204d878389d0d4bc88f6ef6edcd477b952dff5cfab \
+ --hash=sha256:bbf06d879070dfce2617c7d2bb19696bb1b2bcbb3b4ae009520e7166dd75dfc2 \
+ --hash=sha256:c612cdd436bb55b8dae1ecdd1d253496c95b006870b7165b8480c6606b8622aa \
+ --hash=sha256:cc2065d1d27f9f89fea8a0fe8fdf6c437ae60987cd7f2928e0d00e532e79e44d \
+ --hash=sha256:e9cf3fba7f389ff8b8342c5fbebb2529321e0ce9e03d7bcb3657ee0876686c36 \
+ --hash=sha256:f8a5b83ca4bf1813da158f63479cfdf848c0761e5120258417b3a96074a489f5
# via
# -r tfjs-converter/python/requirements.txt
# tensorflow-decision-forests
-tensorflow-decision-forests==1.5.0 \
- --hash=sha256:04fd913627d08fe54514b179c612e87eebf55f1448bf01951660985dfa14a6e1 \
- --hash=sha256:1209a2832ac65f8f74bd9d0c1d58f3f8b771e7fa5c9d504c547842311647b7d4 \
- --hash=sha256:22e3835acbfbd5356bb2f8e0c973dfc40ef80a7924b793e90c811158448cfe77 \
- --hash=sha256:43ffd4fba1c3376f58a9dcee943df80f0cff6e47224d109ad0389a723c74947c \
- --hash=sha256:4a0df3a3be5751594d49f5a8f99977b553cf1c42f320b952ac2a2f67b85283f5 \
- --hash=sha256:5ec4297eb5e7c4110cf8aae89e9b08b9ad2cb725e3e63c89c78304c0d7235d24 \
- --hash=sha256:804f6bed277b5c5b6d2bd85738a64973d5d3e8e6ac06abf6098545740245cedc \
- --hash=sha256:a43af2a5a8c34e550bf549c6cad96da271979efc5a8ec988f6f76cc90770415a \
- --hash=sha256:d137241dad8e884d0c937aa8769fe0768324804e9ba666a78b7b5f2f536a0bd2 \
- --hash=sha256:d685e92abe44920ee6d89394ec4e075bb1ada7402f673566146e1b476a576e96 \
- --hash=sha256:f5d8c3730578bda55a8f520ae39b0c9b2560d69bd53b57882e5371c1a82ba098 \
- --hash=sha256:fbd403acf736bb9b4afd2985d9056e6d5043fc4b9a31bd05e5fcae2b1d413dc3
+ # tf-keras
+tensorflow-decision-forests==1.9.0 \
+ --hash=sha256:54d9bb6040fb7698860a23f38ec8a5ce4c2d162f7a54ce82b1b13cf353bac31a \
+ --hash=sha256:688d522d4de7f8e868f068df383d6cfe7f898cba60811f325f470c784ce365e2 \
+ --hash=sha256:7868b1ad4054b14d3f45635fb7eab73495a25900ea4cf12fecc140c3c2004909 \
+ --hash=sha256:942d0501ed95ef2964d1fdb4196b34b75794cc19276770c169de8d4638efa350 \
+ --hash=sha256:baafff33647e87565b8e93bff92f3bace89e4efb5cfd2aceff1a05de52ab3d16 \
+ --hash=sha256:bbc76e92c693114037e5380fcc11201d260e7290f30a56daf23306e0103dd9bb \
+ --hash=sha256:bf85a2d292bcce59d31518f102baa6b8c42d40e73dd5b667d4df83564b2b01dd \
+ --hash=sha256:c5fe3b8fca3579f9342995a85f1c66b8c3524d002ff6cab92d90b557a79715ef \
+ --hash=sha256:f24a830e9d0c3283579ce8406009580ab9295371a014001511963be7c19f8b07
# via -r tfjs-converter/python/requirements.txt
-tensorflow-estimator==2.13.0 \
- --hash=sha256:6f868284eaa654ae3aa7cacdbef2175d0909df9fcf11374f5166f8bf475952aa
- # via tensorflow
-tensorflow-hub==0.14.0 \
- --hash=sha256:519c6b56c4d304667fbd8ce66bd637e6a750c901215468db2cc6bfd0739bb0b0
+tensorflow-hub==0.16.1 \
+ --hash=sha256:e10c184b3d08daeafada11ffea2dd46781725b6bef01fad1f74d6634ad05311f
# via -r tfjs-converter/python/requirements.txt
tensorflow-io-gcs-filesystem==0.34.0 \
--hash=sha256:027a07553367187f918a99661f63ae0506b91b77a70bee9c7ccaf3920bf7cfe7 \
@@ -862,6 +877,13 @@ termcolor==2.1.1 \
--hash=sha256:67cee2009adc6449c650f6bcf3bdeed00c8ba53a8cda5362733c53e0a39fb70b \
--hash=sha256:fa852e957f97252205e105dd55bbc23b419a70fec0085708fc0515e399f304fd
# via tensorflow
+tf-keras==2.16.0 \
+ --hash=sha256:b2ad0541fa7d9e92c4b7a1b96593377afb58aaff374299a6ca6be1a42f51d899 \
+ --hash=sha256:db53891f1ac98197c2acced98cdca8c06ba8255655a6cb7eb95ed49676118280
+ # via
+ # -r tfjs-converter/python/requirements.txt
+ # tensorflow-decision-forests
+ # tensorflow-hub
toml==0.10.2 \
--hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
--hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
@@ -876,6 +898,7 @@ typing-extensions==4.4.0 \
# via
# flax
# optax
+ # optree
# orbax-checkpoint
# tensorflow
urllib3==1.26.13 \
@@ -895,7 +918,6 @@ wheel==0.38.4 \
--hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8
# via
# astunparse
- # tensorboard
# tensorflow-decision-forests
wrapt==1.12.1 \
--hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
diff --git a/tfjs-converter/python/requirements.txt b/tfjs-converter/python/requirements.txt
index 88d66d0d3b0..4a2c3cb337f 100644
--- a/tfjs-converter/python/requirements.txt
+++ b/tfjs-converter/python/requirements.txt
@@ -3,7 +3,8 @@ importlib_resources>=5.9.0
jax>=0.4.23
jaxlib>=0.4.23
tensorflow>=2.13.0,<3
-tensorflow-decision-forests>=1.5.0
+tf-keras>=2.16.0
+tensorflow-decision-forests>=1.9.0
six>=1.16.0,<2
-tensorflow-hub>=0.14.0
+tensorflow-hub>=0.16.1
packaging~=23.1
diff --git a/tfjs-converter/python/requirements_lock.txt b/tfjs-converter/python/requirements_lock.txt
index ddb45f9a10c..369b7c71589 100644
--- a/tfjs-converter/python/requirements_lock.txt
+++ b/tfjs-converter/python/requirements_lock.txt
@@ -9,6 +9,7 @@ absl-py==1.3.0 \
--hash=sha256:463c38a08d2e4cef6c498b76ba5bd4858e4c6ef51da1a5a1f27139a022e20248
# via
# chex
+ # keras
# optax
# orbax-checkpoint
# tensorboard
@@ -22,10 +23,6 @@ cached-property==1.5.2 \
--hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
--hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0
# via orbax-checkpoint
-cachetools==5.2.0 \
- --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
- --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
- # via google-auth
certifi==2022.12.7 \
--hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
--hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
@@ -86,16 +83,6 @@ gast==0.4.0 \
--hash=sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1 \
--hash=sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4
# via tensorflow
-google-auth==2.15.0 \
- --hash=sha256:6897b93556d8d807ad70701bb89f000183aea366ca7ed94680828b37437a4994 \
- --hash=sha256:72f12a6cfc968d754d7bdab369c5c5c16032106e52d32c6dfd8484e4c01a6d1f
- # via
- # google-auth-oauthlib
- # tensorboard
-google-auth-oauthlib==1.0.0 \
- --hash=sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb \
- --hash=sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5
- # via tensorboard
google-pasta==0.2.0 \
--hash=sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954 \
--hash=sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed \
@@ -150,28 +137,35 @@ grpcio==1.51.1 \
# via
# tensorboard
# tensorflow
-h5py==3.7.0 \
- --hash=sha256:03d64fb86bb86b978928bad923b64419a23e836499ec6363e305ad28afd9d287 \
- --hash=sha256:04e2e1e2fc51b8873e972a08d2f89625ef999b1f2d276199011af57bb9fc7851 \
- --hash=sha256:0798a9c0ff45f17d0192e4d7114d734cac9f8b2b2c76dd1d923c4d0923f27bb6 \
- --hash=sha256:0a047fddbe6951bce40e9cde63373c838a978c5e05a011a682db9ba6334b8e85 \
- --hash=sha256:0d8de8cb619fc597da7cf8cdcbf3b7ff8c5f6db836568afc7dc16d21f59b2b49 \
- --hash=sha256:1fcb11a2dc8eb7ddcae08afd8fae02ba10467753a857fa07a404d700a93f3d53 \
- --hash=sha256:3fcf37884383c5da64846ab510190720027dca0768def34dd8dcb659dbe5cbf3 \
- --hash=sha256:43fed4d13743cf02798a9a03a360a88e589d81285e72b83f47d37bb64ed44881 \
- --hash=sha256:63beb8b7b47d0896c50de6efb9a1eaa81dbe211f3767e7dd7db159cea51ba37a \
- --hash=sha256:6776d896fb90c5938de8acb925e057e2f9f28755f67ec3edcbc8344832616c38 \
- --hash=sha256:9e2ad2aa000f5b1e73b5dfe22f358ca46bf1a2b6ca394d9659874d7fc251731a \
- --hash=sha256:9e7535df5ee3dc3e5d1f408fdfc0b33b46bc9b34db82743c82cd674d8239b9ad \
- --hash=sha256:a9351d729ea754db36d175098361b920573fdad334125f86ac1dd3a083355e20 \
- --hash=sha256:c038399ce09a58ff8d89ec3e62f00aa7cb82d14f34e24735b920e2a811a3a426 \
- --hash=sha256:d77af42cb751ad6cc44f11bae73075a07429a5cf2094dfde2b1e716e059b3911 \
- --hash=sha256:e5b7820b75f9519499d76cc708e27242ccfdd9dfb511d6deb98701961d0445aa \
- --hash=sha256:ed43e2cc4f511756fd664fb45d6b66c3cbed4e3bd0f70e29c37809b2ae013c44 \
- --hash=sha256:f084bbe816907dfe59006756f8f2d16d352faff2d107f4ffeb1d8de126fc5dc7 \
- --hash=sha256:f514b24cacdd983e61f8d371edac8c1b780c279d0acb8485639e97339c866073 \
- --hash=sha256:f73307c876af49aa869ec5df1818e9bb0bdcfcf8a5ba773cc45a4fba5a286a5c
- # via tensorflow
+h5py==3.10.0 \
+ --hash=sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c \
+ --hash=sha256:212bb997a91e6a895ce5e2f365ba764debeaef5d2dca5c6fb7098d66607adf99 \
+ --hash=sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc \
+ --hash=sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339 \
+ --hash=sha256:3074ec45d3dc6e178c6f96834cf8108bf4a60ccb5ab044e16909580352010a97 \
+ --hash=sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3 \
+ --hash=sha256:43a61b2c2ad65b1fabc28802d133eed34debcc2c8b420cb213d3d4ef4d3e2229 \
+ --hash=sha256:492305a074327e8d2513011fa9fffeb54ecb28a04ca4c4227d7e1e9616d35641 \
+ --hash=sha256:5dfc65ac21fa2f630323c92453cadbe8d4f504726ec42f6a56cf80c2f90d6c52 \
+ --hash=sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd \
+ --hash=sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52 \
+ --hash=sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03 \
+ --hash=sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20 \
+ --hash=sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7 \
+ --hash=sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684 \
+ --hash=sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f \
+ --hash=sha256:9450464b458cca2c86252b624279115dcaa7260a40d3cb1594bf2b410a2bd1a3 \
+ --hash=sha256:ae2f0201c950059676455daf92700eeb57dcf5caaf71b9e1328e6e6593601770 \
+ --hash=sha256:aece0e2e1ed2aab076c41802e50a0c3e5ef8816d60ece39107d68717d4559824 \
+ --hash=sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f \
+ --hash=sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039 \
+ --hash=sha256:d4682b94fd36ab217352be438abd44c8f357c5449b8995e63886b431d260f3d3 \
+ --hash=sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049 \
+ --hash=sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d \
+ --hash=sha256:fd6f6d1384a9f491732cee233b99cd4bfd6e838a8815cc86722f9d2ee64032af
+ # via
+ # keras
+ # tensorflow
idna==3.4 \
--hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
--hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
@@ -223,9 +217,9 @@ jaxlib==0.4.23 \
# chex
# optax
# orbax-checkpoint
-keras==2.13.1 \
- --hash=sha256:5ce5f706f779fa7330e63632f327b75ce38144a120376b2ae1917c00fa6136af \
- --hash=sha256:5df12cc241a015a11b65ddb452c0eeb2744fce21d9b54ba48db87492568ccc68
+keras==3.1.1 \
+ --hash=sha256:55558ea228dc38e7667874fd2e83eaf7faeb026e2e8615b36a8616830f7e303b \
+ --hash=sha256:b5d45f0b5116b11db502da00bd501592364325d01724e6cb2032711e3e32677e
# via tensorflow
libclang==14.0.6 \
--hash=sha256:206d2789e4450a37d054e63b70451a6fc1873466397443fa13de2b3d4adb2796 \
@@ -285,27 +279,29 @@ markupsafe==2.1.1 \
--hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \
--hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7
# via werkzeug
-ml-dtypes==0.2.0 \
- --hash=sha256:022d5a4ee6be14569c2a9d1549e16f1ec87ca949681d0dca59995445d5fcdd5b \
- --hash=sha256:1749b60348da71fd3c2ab303fdbc1965958dc50775ead41f5669c932a341cafd \
- --hash=sha256:32107e7fa9f62db9a5281de923861325211dfff87bd23faefb27b303314635ab \
- --hash=sha256:35b984cddbe8173b545a0e3334fe56ea1a5c3eb67c507f60d0cfde1d3fa8f8c2 \
- --hash=sha256:36d28b8861a8931695e5a31176cad5ae85f6504906650dea5598fbec06c94606 \
- --hash=sha256:50845af3e9a601810751b55091dee6c2562403fa1cb4e0123675cf3a4fc2c17a \
- --hash=sha256:6488eb642acaaf08d8020f6de0a38acee7ac324c1e6e92ee0c0fea42422cb797 \
- --hash=sha256:75015818a7fccf99a5e8ed18720cb430f3e71a8838388840f4cdf225c036c983 \
- --hash=sha256:80d304c836d73f10605c58ccf7789c171cc229bfb678748adfb7cea2510dfd0e \
- --hash=sha256:832a019a1b6db5c4422032ca9940a990fa104eee420f643713241b3a518977fa \
- --hash=sha256:8faaf0897942c8253dd126662776ba45f0a5861968cf0f06d6d465f8a7bc298a \
- --hash=sha256:bc29a0524ef5e23a7fbb8d881bdecabeb3fc1d19d9db61785d077a86cb94fab2 \
- --hash=sha256:df6a76e1c8adf484feb138ed323f9f40a7b6c21788f120f7c78bec20ac37ee81 \
- --hash=sha256:e70047ec2c83eaee01afdfdabee2c5b0c133804d90d0f7db4dd903360fcc537c \
- --hash=sha256:e85ba8e24cf48d456e564688e981cf379d4c8e644db0a2f719b78de281bac2ca \
- --hash=sha256:f00c71c8c63e03aff313bc6a7aeaac9a4f1483a921a6ffefa6d4404efd1af3d0 \
- --hash=sha256:f08c391c2794f2aad358e6f4c70785a9a7b1df980ef4c232b3ccd4f6fe39f719
+ml-dtypes==0.3.2 \
+ --hash=sha256:2c34f2ba9660b21fe1034b608308a01be82bbef2a92fb8199f24dc6bad0d5226 \
+ --hash=sha256:3a17ef2322e60858d93584e9c52a5be7dd6236b056b7fa1ec57f1bb6ba043e33 \
+ --hash=sha256:533059bc5f1764fac071ef54598db358c167c51a718f68f5bb55e3dee79d2967 \
+ --hash=sha256:6604877d567a29bfe7cc02969ae0f2425260e5335505cf5e7fefc3e5465f5655 \
+ --hash=sha256:6b35c4e8ca957c877ac35c79ffa77724ecc3702a1e4b18b08306c03feae597bb \
+ --hash=sha256:763697ab8a88d47443997a7cdf3aac7340049aed45f7521f6b0ec8a0594821fe \
+ --hash=sha256:7a4c3fcbf86fa52d0204f07cfd23947ef05b4ad743a1a988e163caa34a201e5e \
+ --hash=sha256:7afde548890a92b41c0fed3a6c525f1200a5727205f73dc21181a2726571bb53 \
+ --hash=sha256:7ba8e1fafc7fff3e643f453bffa7d082df1678a73286ce8187d3e825e776eb94 \
+ --hash=sha256:91f8783fd1f2c23fd3b9ee5ad66b785dafa58ba3cdb050c4458021fa4d1eb226 \
+ --hash=sha256:93b78f53431c93953f7850bb1b925a17f0ab5d97527e38a7e865b5b4bc5cfc18 \
+ --hash=sha256:961134ea44c7b8ca63eda902a44b58cd8bd670e21d62e255c81fba0a8e70d9b7 \
+ --hash=sha256:b89b194e9501a92d289c1ffd411380baf5daafb9818109a4f49b0a1b6dce4462 \
+ --hash=sha256:c7b3fb3d4f6b39bcd4f6c4b98f406291f0d681a895490ee29a0f95bab850d53c \
+ --hash=sha256:d1a746fe5fb9cd974a91070174258f0be129c592b93f9ce7df6cc336416c3fbd \
+ --hash=sha256:e8505946df1665db01332d885c2020b4cb9e84a8b1241eb4ba69d59591f65855 \
+ --hash=sha256:f47619d978ab1ae7dfdc4052ea97c636c6263e1f19bd1be0e42c346b98d15ff4
# via
# jax
# jaxlib
+ # keras
+ # tensorflow
msgpack==1.0.4 \
--hash=sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467 \
--hash=sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae \
@@ -362,6 +358,10 @@ msgpack==1.0.4 \
# via
# flax
# orbax-checkpoint
+namex==0.0.7 \
+ --hash=sha256:84ba65bc4d22bd909e3d26bf2ffb4b9529b608cb3f9a4336f776b04204ced69b \
+ --hash=sha256:8a4f062945f405d77cb66b907f16aa2fd83681945e998be840eb6c4154d40108
+ # via keras
nest-asyncio==1.5.7 \
--hash=sha256:5301c82941b550b3123a1ea772ba9a1c80bad3a182be8c1a5ae6ad3be57a9657 \
--hash=sha256:6a80f7b98f24d9083ed24608977c09dd608d83f91cccc24c9d2cba6d10e01c10
@@ -401,6 +401,7 @@ numpy==1.23.5 \
# h5py
# jax
# jaxlib
+ # keras
# ml-dtypes
# opt-einsum
# optax
@@ -412,10 +413,6 @@ numpy==1.23.5 \
# tensorflow-decision-forests
# tensorflow-hub
# tensorstore
-oauthlib==3.2.2 \
- --hash=sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca \
- --hash=sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918
- # via requests-oauthlib
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549
@@ -426,6 +423,47 @@ optax==0.1.4 \
--hash=sha256:12fcf33bd682f9a162a3deb097f864130c3224d76771af2ba09410de80399a9b \
--hash=sha256:fb7a0550d57a6636164a3de25986a8a19be8ff6431fcdf1225b4e05175810f22
# via flax
+optree==0.11.0 \
+ --hash=sha256:00a63f10d4a476e8e9aa2988daba9b2e88cb369c5aacc12545957d7d00bcd1a7 \
+ --hash=sha256:0db6968394096223881053dffdcaf2b8e220fd85db904f14aa931e4dc422c046 \
+ --hash=sha256:0df9a3923725aabb112ec7f10c74fa96b6c640da1cd30e7bc62fd4b03ef02875 \
+ --hash=sha256:162ed3ff2eb3f1c358e131e72c025f2b93d69b906e9057a811d014032ec71dc8 \
+ --hash=sha256:228b97e8c991739b10c8548c118747ba32ee765f88236342e492bf9648afc0bc \
+ --hash=sha256:234a4f8f97a1217f13390df7ac416771689749d9a1c8eda31bf8622cd333219e \
+ --hash=sha256:26b1230f9b75b579923a4f837c7c13db8b8d815cf68ce5af31dda5d818a877b2 \
+ --hash=sha256:2b3bb59324d635f2015bb3e237fd772b1fd548eee6cc80e008fbe0f092e9228d \
+ --hash=sha256:2bc08fb9691f43afc3a01119dead6b823ce3d7239e42fc3e47d4028eed50a6a2 \
+ --hash=sha256:31d444684ebd8c9f09a3d806fb3277843138ef9952b7a2954908e440e3b22519 \
+ --hash=sha256:39bed744a61e2f795e172d2853779ac59b8dea236982dc160ea22063afc99ca3 \
+ --hash=sha256:3cdc9fac9888d9eff11128ccfc4d4c10309163e372f312f7942ecee8df3d7824 \
+ --hash=sha256:4144126dd3c2ece2d2dd1d5e0b39fb91adf1c46f660c2c5a2df7f80666989d5d \
+ --hash=sha256:418850ceff364f51a6d81f32a1efd06a4e2d8df79a162e892685bc20c0aedd72 \
+ --hash=sha256:5e250144eacdd5813dec0b18d91df0229197e3be402db42fd8e254ec90ea343d \
+ --hash=sha256:5e5df0e8aaca124cc1ffca311786cc909810f3c046de090729cdafbf910082f8 \
+ --hash=sha256:63e020a34b7168b5d0701a265c7c95b07984ff699d4894b20fa601282be88f20 \
+ --hash=sha256:64c2e00fe508f50a42c50838df0d1f5be0dce5b4bef2373db8ad72b860211015 \
+ --hash=sha256:6a406eee5acd3fd4875fa44c3972d29ae6d4329e7296e9219986fe6ff8e92ea0 \
+ --hash=sha256:6cdd625dab2dff5374ff9c6792e8702fced8f0ea713ce959fc8f95499b5ecb2f \
+ --hash=sha256:6e8c3757088cd7fce666f2a5e031b65d7898e210452380d2657c0fc0a7ec9932 \
+ --hash=sha256:738e8bf4158e9c11cd051d89c2e453aeacf80ff8719ebc3251069015646554d0 \
+ --hash=sha256:8e6a46e95c3ea8546055087d6fe52a1dcd56de5182365f1469106cc72cdf3307 \
+ --hash=sha256:979ffc2b96f16595c219fb7a89597dd2fa00ac47a3b411fdcf8ae6821da52290 \
+ --hash=sha256:9bf322ad14f907ad4660ca286e731e750546d54934a94cc5ba7efe8860c60ab4 \
+ --hash=sha256:9d9d644e5448db9f32e2497487aca3bb2d3f92cbb50429a411ccda3f1f0968f3 \
+ --hash=sha256:a5f37bcfe4e363e3bb8d36c5698fb829546956b2fe88951994387162a1859625 \
+ --hash=sha256:a64df43fce2d8eeafd7db6e27447c56b3fa64842df847819684b3b1cc254c016 \
+ --hash=sha256:a91840f9d45e7c01f151ba1815ae32b4c3c21e4290298772ee4b13314f729856 \
+ --hash=sha256:b201a9405e250cf5770955863af2a236e382bdf5e4e086897ff03c41418c39da \
+ --hash=sha256:b26ac807d8993b7e43081b4b7bbb0378b4e5f3e6525daf923c470bc176cc3327 \
+ --hash=sha256:b8126d81ecb2c9e3554420834014ba343251f564c905ee3bef09d205b924b0c0 \
+ --hash=sha256:b9d236bc1491a5e366921b95fecc05aa6ff55989a81f2242cd11121b82c24503 \
+ --hash=sha256:bc17f9d085cd75a2de4f299a9c5e3c3520138eac7596061e581230b03862b44d \
+ --hash=sha256:d666099a78f7bf31bf3a520d6871ddcae65484bcff095fc4271a391553b09c75 \
+ --hash=sha256:e2d47bd28eff690eb2f7432e490265a291b04d6d346cf7b586491b2e2337bf97 \
+ --hash=sha256:ee208f0bec6436085a9fa3ae98af54bfcb8822086894fc1ade283e80a6f11fd7 \
+ --hash=sha256:f53951bfb640417558568284a8949d67bcdbf21fa0113107e20bd9403aa20b2b \
+ --hash=sha256:fa9ed745d4cbac5e15df70339b30867ba033542b87f7b734f4cacae5ec73ba00
+ # via keras
orbax-checkpoint==0.2.3 \
--hash=sha256:155e0a2dceef2901122e66585171e1dff4f4a4d9d2abe43a2b514279b9a3dabd \
--hash=sha256:a001bf48f1cebc635b07263fa546473ea48be3e278c50d5ade880b9aafb96f8a
@@ -483,16 +521,6 @@ protobuf==4.22.3 \
# tensorboard
# tensorflow
# tensorflow-hub
-pyasn1==0.4.8 \
- --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \
- --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba
- # via
- # pyasn1-modules
- # rsa
-pyasn1-modules==0.2.8 \
- --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \
- --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74
- # via google-auth
pygments==2.13.0 \
--hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \
--hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42
@@ -552,21 +580,13 @@ pyyaml==6.0 \
requests==2.28.1 \
--hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \
--hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349
- # via
- # requests-oauthlib
- # tensorboard
-requests-oauthlib==1.3.1 \
- --hash=sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5 \
- --hash=sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a
- # via google-auth-oauthlib
+ # via tensorflow
rich==11.2.0 \
--hash=sha256:1a6266a5738115017bb64a66c59c717e7aa047b3ae49a011ede4abdeffc6536e \
--hash=sha256:d5f49ad91fb343efcae45a2b2df04a9755e863e50413623ab8c9e74f05aee52b
- # via flax
-rsa==4.9 \
- --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
- --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
- # via google-auth
+ # via
+ # flax
+ # keras
scipy==1.9.3 \
--hash=sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31 \
--hash=sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108 \
@@ -604,62 +624,57 @@ six==1.16.0 \
# via
# -r tfjs-converter/python/requirements.txt
# astunparse
- # google-auth
# google-pasta
# python-dateutil
+ # tensorboard
# tensorflow
# tensorflow-decision-forests
-tensorboard==2.13.0 \
- --hash=sha256:ab69961ebddbddc83f5fa2ff9233572bdad5b883778c35e4fe94bf1798bd8481
+tensorboard==2.16.2 \
+ --hash=sha256:9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45
# via tensorflow
tensorboard-data-server==0.7.0 \
--hash=sha256:64aa1be7c23e80b1a42c13b686eb0875bb70f5e755f4d2b8de5c1d880cf2267f \
--hash=sha256:753d4214799b31da7b6d93837959abebbc6afa86e69eacf1e9a317a48daa31eb \
--hash=sha256:eb7fa518737944dbf4f0cf83c2e40a7ac346bf91be2e6a0215de98be74e85454
# via tensorboard
-tensorflow==2.13.0 \
- --hash=sha256:00060c5516a61e30c51936084ebc37091d116efe9ae74b2818cbd8b2006218e7 \
- --hash=sha256:06559eeaa69e6561cccbe2d02b015bcec663e875c8bbc4643f55692476e52147 \
- --hash=sha256:076d953a1508dc58bf95f30f58bcc9ee364b1353c61e143cb20c2dada91afb05 \
- --hash=sha256:11ad6a7ff49b4a690fbf37a5adaf28ba2686350a859c5f13c58dc8d2cc670375 \
- --hash=sha256:19ee67901702b26787ad685cca65730c163c101c0c2f238a2584d714e0fa8c25 \
- --hash=sha256:2822ac48c38d69b7fb104e606dacbd763c4bf5d3b20791f25be16a5076d01912 \
- --hash=sha256:5e0fdadec59de3d11c5b5129ddc38e739bde7aa13095b82e19d4380e14d04999 \
- --hash=sha256:6fff426661d286a4c634da44275d2ea2b951f392f3e65c8603681e7cb040586a \
- --hash=sha256:72d68b8c2f382e2d01b956c8ba516c0a7d5dad98111dd351bf82bfa646aa1c72 \
- --hash=sha256:7a08c0e2938ed5b642a8787678123827477b81d316055d5073fff82fa183eb82 \
- --hash=sha256:89125443e998548059c4e4a129dfab2b1ff7f2fd4c8eaed97842c3cd9b663101 \
- --hash=sha256:948003b5a23b72b3d89746d729e62ec5f01e47460f05521b2211d95069f569ba \
- --hash=sha256:9c04bc3023b6c4cfb9ee9759c3f03f21993891b4c345df52eb5519204fbf28c0 \
- --hash=sha256:b2978b39e8b3919059b5fd9e28508d50a77965d06ed0b537ed71c97de22dabdf \
- --hash=sha256:cbb83561bb7d55859eaefc70c674e58713d4e10c10927423ed836a5289bbfa86 \
- --hash=sha256:de77306c0c22c9d8754f54700752ac3a1efee895c5357308e6594436404bfbc0 \
- --hash=sha256:e0cf94d36ceaba8f158c6e15404a81fd5b3aa4cb04147c674cf55bd1aec78154 \
- --hash=sha256:e8f0b69ee2f800399fc6bc7ec55fecfa33662d136e425485959d90638f32a32a \
- --hash=sha256:fa7abe265cc3ebccc9b405a280bf674824c6d85df5e6ccfa985987b3c9d265b4 \
- --hash=sha256:fb2ff1129c93e853c19897d6a22ed0ec56387f5c6290ec03dec1c6f7b80bc396
+tensorflow==2.16.1 \
+ --hash=sha256:03b946e73bf48d857928329b8b321b00b42fe1b4f774c6580666683b0629689f \
+ --hash=sha256:093573a8eb93ef9511e7015b8de9659ed27156f2f05e6d1211f8f4cb76407ee1 \
+ --hash=sha256:09cac3c6a8fbf85a9b95491b58086154dd00a09956ed31823bb45c6605f0e881 \
+ --hash=sha256:1c5611e7357b7a4bc6dccc60750c91e27cdff82622fc917848f22add5ab8de26 \
+ --hash=sha256:1e96047657c64459a36a0cc211a3d003df96c7be3f95a84f7b705715f5697270 \
+ --hash=sha256:21a3c6d76a39f52754c389326f6bef8aef3c26b5bc89ca365add4a69483e569e \
+ --hash=sha256:42858b5d14159a2b9cc01c7f5a88e063b0601f20430cb358374005a67da38114 \
+ --hash=sha256:4a123fbb5788ba30d1113ce01bb166ddf85056fcb40e287c32a929ebfa4aa061 \
+ --hash=sha256:617df9fa2d697c4bc22fa3ee87eb01d580ab1bd0438fea15c4ec2f2870c40bb0 \
+ --hash=sha256:8231a9d7bba92a51231dcdcc3073920ad7d22fa88c64c7e2ecb7f1feac9d5fcb \
+ --hash=sha256:8e376ab46fb1df18a1f927d77011d36ecf7b717a81cbfe4a941c7bf5236939b3 \
+ --hash=sha256:92152aa77c402684e9066885515af6a45d88455c4453a818052c7369357078d8 \
+ --hash=sha256:930c61100cce3a5cb63d30fe6776504405214e8398a26ca968222ecb8b8f9404 \
+ --hash=sha256:ab79f156dd746c2dae906e3b4c5daac3855742941752e5a2c28f094c56eed466 \
+ --hash=sha256:ae0554471d472b8095f8a5204d878389d0d4bc88f6ef6edcd477b952dff5cfab \
+ --hash=sha256:bbf06d879070dfce2617c7d2bb19696bb1b2bcbb3b4ae009520e7166dd75dfc2 \
+ --hash=sha256:c612cdd436bb55b8dae1ecdd1d253496c95b006870b7165b8480c6606b8622aa \
+ --hash=sha256:cc2065d1d27f9f89fea8a0fe8fdf6c437ae60987cd7f2928e0d00e532e79e44d \
+ --hash=sha256:e9cf3fba7f389ff8b8342c5fbebb2529321e0ce9e03d7bcb3657ee0876686c36 \
+ --hash=sha256:f8a5b83ca4bf1813da158f63479cfdf848c0761e5120258417b3a96074a489f5
# via
# -r tfjs-converter/python/requirements.txt
# tensorflow-decision-forests
-tensorflow-decision-forests==1.5.0 \
- --hash=sha256:04fd913627d08fe54514b179c612e87eebf55f1448bf01951660985dfa14a6e1 \
- --hash=sha256:1209a2832ac65f8f74bd9d0c1d58f3f8b771e7fa5c9d504c547842311647b7d4 \
- --hash=sha256:22e3835acbfbd5356bb2f8e0c973dfc40ef80a7924b793e90c811158448cfe77 \
- --hash=sha256:43ffd4fba1c3376f58a9dcee943df80f0cff6e47224d109ad0389a723c74947c \
- --hash=sha256:4a0df3a3be5751594d49f5a8f99977b553cf1c42f320b952ac2a2f67b85283f5 \
- --hash=sha256:5ec4297eb5e7c4110cf8aae89e9b08b9ad2cb725e3e63c89c78304c0d7235d24 \
- --hash=sha256:804f6bed277b5c5b6d2bd85738a64973d5d3e8e6ac06abf6098545740245cedc \
- --hash=sha256:a43af2a5a8c34e550bf549c6cad96da271979efc5a8ec988f6f76cc90770415a \
- --hash=sha256:d137241dad8e884d0c937aa8769fe0768324804e9ba666a78b7b5f2f536a0bd2 \
- --hash=sha256:d685e92abe44920ee6d89394ec4e075bb1ada7402f673566146e1b476a576e96 \
- --hash=sha256:f5d8c3730578bda55a8f520ae39b0c9b2560d69bd53b57882e5371c1a82ba098 \
- --hash=sha256:fbd403acf736bb9b4afd2985d9056e6d5043fc4b9a31bd05e5fcae2b1d413dc3
+ # tf-keras
+tensorflow-decision-forests==1.9.0 \
+ --hash=sha256:54d9bb6040fb7698860a23f38ec8a5ce4c2d162f7a54ce82b1b13cf353bac31a \
+ --hash=sha256:688d522d4de7f8e868f068df383d6cfe7f898cba60811f325f470c784ce365e2 \
+ --hash=sha256:7868b1ad4054b14d3f45635fb7eab73495a25900ea4cf12fecc140c3c2004909 \
+ --hash=sha256:942d0501ed95ef2964d1fdb4196b34b75794cc19276770c169de8d4638efa350 \
+ --hash=sha256:baafff33647e87565b8e93bff92f3bace89e4efb5cfd2aceff1a05de52ab3d16 \
+ --hash=sha256:bbc76e92c693114037e5380fcc11201d260e7290f30a56daf23306e0103dd9bb \
+ --hash=sha256:bf85a2d292bcce59d31518f102baa6b8c42d40e73dd5b667d4df83564b2b01dd \
+ --hash=sha256:c5fe3b8fca3579f9342995a85f1c66b8c3524d002ff6cab92d90b557a79715ef \
+ --hash=sha256:f24a830e9d0c3283579ce8406009580ab9295371a014001511963be7c19f8b07
# via -r tfjs-converter/python/requirements.txt
-tensorflow-estimator==2.13.0 \
- --hash=sha256:6f868284eaa654ae3aa7cacdbef2175d0909df9fcf11374f5166f8bf475952aa
- # via tensorflow
-tensorflow-hub==0.14.0 \
- --hash=sha256:519c6b56c4d304667fbd8ce66bd637e6a750c901215468db2cc6bfd0739bb0b0
+tensorflow-hub==0.16.1 \
+ --hash=sha256:e10c184b3d08daeafada11ffea2dd46781725b6bef01fad1f74d6634ad05311f
# via -r tfjs-converter/python/requirements.txt
tensorflow-io-gcs-filesystem==0.34.0 \
--hash=sha256:027a07553367187f918a99661f63ae0506b91b77a70bee9c7ccaf3920bf7cfe7 \
@@ -707,6 +722,13 @@ termcolor==2.1.1 \
--hash=sha256:67cee2009adc6449c650f6bcf3bdeed00c8ba53a8cda5362733c53e0a39fb70b \
--hash=sha256:fa852e957f97252205e105dd55bbc23b419a70fec0085708fc0515e399f304fd
# via tensorflow
+tf-keras==2.16.0 \
+ --hash=sha256:b2ad0541fa7d9e92c4b7a1b96593377afb58aaff374299a6ca6be1a42f51d899 \
+ --hash=sha256:db53891f1ac98197c2acced98cdca8c06ba8255655a6cb7eb95ed49676118280
+ # via
+ # -r tfjs-converter/python/requirements.txt
+ # tensorflow-decision-forests
+ # tensorflow-hub
toolz==0.12.0 \
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
@@ -717,6 +739,7 @@ typing-extensions==4.4.0 \
# via
# flax
# optax
+ # optree
# orbax-checkpoint
# tensorflow
urllib3==1.26.13 \
@@ -732,7 +755,6 @@ wheel==0.38.4 \
--hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8
# via
# astunparse
- # tensorboard
# tensorflow-decision-forests
wrapt==1.14.1 \
--hash=sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3 \
diff --git a/tfjs-converter/python/tensorflowjs/BUILD.bazel b/tfjs-converter/python/tensorflowjs/BUILD.bazel
index 2c3f02d7b2e..a84c5b0cbd6 100644
--- a/tfjs-converter/python/tensorflowjs/BUILD.bazel
+++ b/tfjs-converter/python/tensorflowjs/BUILD.bazel
@@ -116,6 +116,12 @@ py_library(
deps = [requirement("debugpy")],
)
+py_library(
+ name = "expect_tf_keras_installed",
+ # tf-keras is used to provide keras 2.0 support.
+ deps = [requirement("tf-keras")],
+)
+
py_library(
name = "quantization",
srcs = ["quantization.py"],
diff --git a/tfjs-converter/python/tensorflowjs/converters/BUILD.bazel b/tfjs-converter/python/tensorflowjs/converters/BUILD.bazel
index df2bfac3381..bf86ceace79 100644
--- a/tfjs-converter/python/tensorflowjs/converters/BUILD.bazel
+++ b/tfjs-converter/python/tensorflowjs/converters/BUILD.bazel
@@ -59,6 +59,7 @@ py_test(
"//tfjs-converter/python/tensorflowjs:expect_h5py_installed",
"//tfjs-converter/python/tensorflowjs:expect_numpy_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
"//tfjs-converter/python/tensorflowjs:version",
],
)
@@ -87,6 +88,7 @@ py_test(
":tf_module_mapper",
"//tfjs-converter/python/tensorflowjs:expect_numpy_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
],
)
@@ -105,6 +107,7 @@ py_library(
":graph_rewrite_util",
"//tfjs-converter/python/tensorflowjs:expect_numpy_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
],
)
@@ -154,6 +157,7 @@ py_library(
":graph_rewrite_util",
"//tfjs-converter/python/tensorflowjs:expect_numpy_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
],
)
@@ -180,6 +184,7 @@ py_library(
":graph_rewrite_util",
"//tfjs-converter/python/tensorflowjs:expect_numpy_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
],
)
@@ -229,6 +234,7 @@ py_library(
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_decision_forests_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_hub_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
"//tfjs-converter/python/tensorflowjs:resource_loader",
"//tfjs-converter/python/tensorflowjs:version",
"//tfjs-converter/python/tensorflowjs:write_weights",
@@ -283,6 +289,7 @@ py_binary(
"//tfjs-converter/python/tensorflowjs:expect_PyInquirer_installed",
"//tfjs-converter/python/tensorflowjs:expect_h5py_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
],
)
@@ -314,6 +321,7 @@ py_binary(
":tf_saved_model_conversion_v2",
"//tfjs-converter/python/tensorflowjs:expect_h5py_installed",
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
"//tfjs-converter/python/tensorflowjs:version",
],
)
@@ -325,6 +333,7 @@ py_binary(
srcs_version = "PY3",
deps = [
"//tfjs-converter/python/tensorflowjs:expect_tensorflow_installed",
+ "//tfjs-converter/python/tensorflowjs:expect_tf_keras_installed",
],
)
diff --git a/tfjs-converter/python/tensorflowjs/converters/converter.py b/tfjs-converter/python/tensorflowjs/converters/converter.py
index d0100e28d47..6c6db2ee62a 100644
--- a/tfjs-converter/python/tensorflowjs/converters/converter.py
+++ b/tfjs-converter/python/tensorflowjs/converters/converter.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Artifact conversion to and from Python TensorFlow and tf.keras."""
+"""Artifact conversion to and from Python TensorFlow and tf_keras."""
from __future__ import absolute_import
from __future__ import division
@@ -28,6 +28,7 @@
import h5py
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflowjs import quantization
from tensorflowjs import version
@@ -50,7 +51,7 @@ def dispatch_keras_h5_to_tfjs_layers_model_conversion(
- A weights-only HDF5 (e.g., generated with Keras Model's `save_weights()`
method),
- A topology+weights combined HDF5 (e.g., generated with
- `tf.keras.model.save_model`).
+ `tf_keras.model.save_model`).
Args:
h5_path: path to an HDF5 file containing keras model data as a `str`.
@@ -199,7 +200,7 @@ def dispatch_keras_h5_to_tfjs_graph_model_conversion(
Args:
h5_path: Path to the HDF5-format file that contains the model saved from
- keras or tf.keras.
+ keras or tf_keras.
output_dir: The destination to which the tfjs GraphModel artifacts will be
written.
quantization_dtype_map: A mapping from dtype (`uint8`, `uint16`, `float16`)
@@ -223,7 +224,7 @@ def dispatch_keras_h5_to_tfjs_graph_model_conversion(
'directory: %s' % h5_path)
temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
- model = tf.keras.models.load_model(h5_path, compile=False)
+ model = tf_keras.models.load_model(h5_path, compile=False)
model.save(temp_savedmodel_dir, include_optimizer=False, save_format='tf')
# NOTE(cais): This cannot use `tf.compat.v1` because
@@ -253,13 +254,13 @@ def dispatch_keras_saved_model_to_tensorflowjs_conversion(
"""Converts keras model saved in the SavedModel format to tfjs format.
Note that the SavedModel format exists in keras, but not in
- keras-team/tf.keras.
+ keras-team/tf_keras.
Args:
keras_saved_model_path: path to a folder in which the
assets/saved_model.json can be found. This is usually a subfolder
that is under the folder passed to
- `tf.keras.models.save_model()` and has a Unix epoch time
+ `tf_keras.models.save_model()` and has a Unix epoch time
as its name (e.g., 1542212752).
output_dir: Output directory to which the TensorFlow.js-format model JSON
file and weights files will be written. If the directory does not exist,
@@ -274,7 +275,7 @@ def dispatch_keras_saved_model_to_tensorflowjs_conversion(
metadata: User defined metadata map.
"""
with tf.Graph().as_default(), tf.compat.v1.Session():
- model = tf.keras.models.load_model(keras_saved_model_path)
+ model = tf_keras.models.load_model(keras_saved_model_path)
# Save model temporarily in HDF5 format.
temp_h5_path = tempfile.mktemp(suffix='.h5')
@@ -363,12 +364,12 @@ def dispatch_tensorflowjs_to_keras_keras_conversion(config_json_path, v3_path):
'but cannot read valid JSON content from %s.' % config_json_path)
model = keras_tfjs_loader.load_keras_keras_model(config_json_path)
- tf.keras.saving.save_model(model, v3_path, save_format="keras")
+ tf_keras.saving.save_model(model, v3_path, save_format="keras")
def dispatch_tensorflowjs_to_keras_saved_model_conversion(
config_json_path, keras_saved_model_path):
- """Converts a TensorFlow.js Layers model format to a tf.keras SavedModel.
+ """Converts a TensorFlow.js Layers model format to a tf_keras SavedModel.
Args:
config_json_path: Path to the JSON file that includes the model's
@@ -397,7 +398,7 @@ def dispatch_tensorflowjs_to_keras_saved_model_conversion(
with tf.Graph().as_default(), tf.compat.v1.Session():
model = keras_tfjs_loader.load_keras_model(config_json_path)
- tf.keras.models.save_model(
+ tf_keras.models.save_model(
model, keras_saved_model_path, save_format='tf')
@@ -751,7 +752,7 @@ def get_arg_parser():
help='Input format. '
'For "keras", the input path can be one of the two following formats:\n'
' - A topology+weights combined HDF5 (e.g., generated with'
- ' `tf.keras.model.save_model()` method).\n'
+ ' `tf_keras.model.save_model()` method).\n'
' - A weights-only HDF5 (e.g., generated with Keras Model\'s '
' `save_weights()` method). \n'
'For "keras_saved_model", the input_path must point to a subfolder '
@@ -885,7 +886,7 @@ def convert(arguments):
if args.show_version:
print('\ntensorflowjs %s\n' % version.version)
print('Dependency versions:')
- print(' keras %s' % tf.keras.__version__)
+ print(' keras %s' % tf_keras.__version__)
print(' tensorflow %s' % tf.__version__)
return
diff --git a/tfjs-converter/python/tensorflowjs/converters/converter_test.py b/tfjs-converter/python/tensorflowjs/converters/converter_test.py
index 397e94d2ced..8598e19fe69 100644
--- a/tfjs-converter/python/tensorflowjs/converters/converter_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/converter_test.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Unit tests for artifact conversion to and from Python tf.keras."""
+"""Unit tests for artifact conversion to and from Python tf_keras."""
from __future__ import absolute_import
from __future__ import division
@@ -24,10 +24,10 @@
import shutil
import tempfile
import unittest
-import keras
import numpy as np
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflowjs import version
from tensorflowjs.converters import converter
@@ -50,13 +50,13 @@ def tearDown(self):
def testWeightsOnly(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MyDense1')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False, kernel_initializer='ones', name='MyDense2')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModel.h5')
model.save_weights(h5_path)
@@ -80,14 +80,14 @@ def testWeightsOnly(self):
def testConvertSavedKerasModelNoSplitByLayer(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MergedDense1')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False,
kernel_initializer='ones', name='MergedDense2')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModelMerged.h5')
model.save(h5_path)
@@ -100,7 +100,7 @@ def testConvertSavedKerasModelNoSplitByLayer(self):
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
# Check the loaded weights.
- self.assertEqual(keras.__version__, model_json['keras_version'])
+ self.assertEqual(tf_keras.__version__, model_json['keras_version'])
self.assertEqual('tensorflow', model_json['backend'])
self.assertEqual(1, len(groups))
self.assertEqual(3, len(groups[0]))
@@ -115,14 +115,14 @@ def testConvertSavedKerasModelNoSplitByLayer(self):
def testConvertSavedKerasModelSplitByLayer(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MergedDense1')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False,
kernel_initializer='ones', name='MergedDense2')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModelMerged.h5')
model.save(h5_path)
@@ -136,7 +136,7 @@ def testConvertSavedKerasModelSplitByLayer(self):
self.assertIn('layers', model_json['model_config']['config'])
# Check the loaded weights.
- self.assertEqual(keras.__version__, model_json['keras_version'])
+ self.assertEqual(tf_keras.__version__, model_json['keras_version'])
self.assertEqual('tensorflow', model_json['backend'])
self.assertEqual(2, len(groups))
self.assertEqual(2, len(groups[0]))
@@ -152,8 +152,8 @@ def testConvertSavedKerasModelSplitByLayer(self):
def testConvertSavedKerasModeltoTfLayersModelSharded(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
@@ -181,11 +181,11 @@ def testConvertSavedKerasModeltoTfLayersModelSharded(self):
def testConvertWeightsFromSequentialModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1'),
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
1, use_bias=False, kernel_initializer='ones', name='Dense2')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
sequential_model.save_weights(h5_path)
@@ -210,11 +210,11 @@ def testConvertWeightsFromSequentialModel(self):
def testUserDefinedMetadata(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1'),
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
1, use_bias=False, kernel_initializer='ones', name='Dense2')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
sequential_model.save_weights(h5_path)
@@ -231,8 +231,8 @@ def testUserDefinedMetadata(self):
def testConvertModelForNonexistentDirCreatesDir(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
output_dir = os.path.join(self._tmp_dir, 'foo_model')
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
@@ -253,8 +253,8 @@ def testOutpuDirAsAnExistingFileLeadsToValueError(self):
f.write('\n')
with tf.Graph().as_default(), tf.compat.v1.Session():
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
@@ -267,11 +267,11 @@ def testOutpuDirAsAnExistingFileLeadsToValueError(self):
def testTensorflowjsToKerasConversionSucceeds(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1'),
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
1, use_bias=False, kernel_initializer='ones', name='Dense2')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
sequential_model.use_legacy_config = True
@@ -287,8 +287,7 @@ def testTensorflowjsToKerasConversionSucceeds(self):
# Load the new H5 and compare the model JSONs.
with tf.Graph().as_default(), tf.compat.v1.Session():
- new_model = tf.keras.models.load_model(new_h5_path)
- new_model.use_legacy_config = True
+ new_model = tf_keras.models.load_model(new_h5_path)
self.assertEqual(old_model_json, new_model.to_json())
def testTensorflowjsToKerasConversionFailsOnDirInputPath(self):
@@ -299,11 +298,11 @@ def testTensorflowjsToKerasConversionFailsOnDirInputPath(self):
def testTensorflowjsToKerasConversionFailsOnExistingDirOutputPath(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1'),
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
1, use_bias=False, kernel_initializer='ones', name='Dense2')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
sequential_model.save(h5_path)
@@ -339,8 +338,8 @@ def tearDown(self):
def testConvertKerasModelToTfGraphModel(self):
output_dir = os.path.join(self._tmp_dir, 'foo_model')
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
@@ -370,8 +369,8 @@ def testConvertKerasModelToTfGraphModel(self):
def testConvertKerasModelToTfGraphModelSharded(self):
output_dir = os.path.join(self._tmp_dir, 'foo_model')
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
@@ -404,8 +403,8 @@ def testConvertKerasModelToTfGraphModelSharded(self):
def testUserDefinedMetadata(self):
output_dir = os.path.join(self._tmp_dir, 'foo_model')
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense1')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
@@ -431,29 +430,29 @@ def tearDown(self):
super(ConvertTfKerasSavedModelTest, self).tearDown()
def _createSimpleSequentialModel(self):
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Reshape([2, 3], input_shape=[6]))
- model.add(tf.keras.layers.LSTM(10))
- model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Reshape([2, 3], input_shape=[6]))
+ model.add(tf_keras.layers.LSTM(10))
+ model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 6)), steps=1)
- tf.keras.backend.set_learning_phase(0)
+ tf_keras.backend.set_learning_phase(0)
return model
def _createNestedSequentialModel(self):
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Dense(6, input_shape=[10], activation='relu'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(6, input_shape=[10], activation='relu'))
model.add(self._createSimpleSequentialModel())
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 10)), steps=1)
return model
def _createFunctionalModelWithWeights(self):
- input1 = tf.keras.Input(shape=[8])
- input2 = tf.keras.Input(shape=[10])
- y = tf.keras.layers.Concatenate()([input1, input2])
- y = tf.keras.layers.Dense(4, activation='softmax')(y)
- model = tf.keras.Model([input1, input2], y)
+ input1 = tf_keras.Input(shape=[8])
+ input2 = tf_keras.Input(shape=[10])
+ y = tf_keras.layers.Concatenate()([input1, input2])
+ y = tf_keras.layers.Dense(4, activation='softmax')(y)
+ model = tf_keras.Model([input1, input2], y)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict([tf.ones((1, 8)), tf.ones((1, 10))], steps=1)
return model
@@ -463,7 +462,7 @@ def testConvertTfKerasSequentialSavedAsSavedModel(self):
model = self._createSimpleSequentialModel()
old_model_json = json.loads(model.to_json())
old_weights = model.get_weights()
- tf.keras.models.save_model(model, self._tmp_dir, save_format='tf')
+ tf_keras.models.save_model(model, self._tmp_dir, save_format='tf')
# Convert the keras SavedModel to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -494,7 +493,7 @@ def testConvertTfKerasSequentialCompiledAndSavedAsSavedModel(self):
old_model_json = json.loads(model.to_json())
old_weights = model.get_weights()
- tf.keras.models.save_model(model, self._tmp_dir, save_format='tf')
+ tf_keras.models.save_model(model, self._tmp_dir, save_format='tf')
# Convert the keras SavedModel to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -522,7 +521,7 @@ def testConvertTfKerasSequentialCompiledAndSavedAsSavedModel(self):
def testWrongConverterRaisesCorrectErrorMessage(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model = self._createSimpleSequentialModel()
- tf.keras.models.save_model(model, self._tmp_dir, save_format='tf')
+ tf_keras.models.save_model(model, self._tmp_dir, save_format='tf')
# Convert the keras SavedModel to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -539,7 +538,7 @@ def testConvertTfKerasNestedSequentialSavedAsSavedModel(self):
model = self._createNestedSequentialModel()
old_model_json = json.loads(model.to_json())
old_weights = model.get_weights()
- tf.keras.models.save_model(model, self._tmp_dir, save_format='tf')
+ tf_keras.models.save_model(model, self._tmp_dir, save_format='tf')
# Convert the keras SavedModel to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -569,7 +568,7 @@ def testConvertTfKerasFunctionalModelWithWeightsSavedAsSavedModel(self):
model = self._createFunctionalModelWithWeights()
old_model_json = json.loads(model.to_json())
old_weights = model.get_weights()
- tf.keras.models.save_model(model, self._tmp_dir, save_format='tf')
+ tf_keras.models.save_model(model, self._tmp_dir, save_format='tf')
# Convert the keras SavedModel to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -597,7 +596,7 @@ def testConvertTfKerasFunctionalModelWithWeightsSavedAsSavedModel(self):
def testConvertTfKerasSequentialSavedAsSavedModelWithQuantization(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model = self._createSimpleSequentialModel()
- tf.keras.models.save_model(model, self._tmp_dir, save_format='tf')
+ tf_keras.models.save_model(model, self._tmp_dir, save_format='tf')
# Convert the keras SavedModel to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -788,7 +787,7 @@ def testConvertTfjsLayersModelToKerasSavedModel(self):
converter.dispatch_keras_h5_to_tfjs_layers_model_conversion(
h5_path, tfjs_output_dir)
- # Convert the tfjs LayersModel to tf.keras SavedModel.
+ # Convert the tfjs LayersModel to tf_keras SavedModel.
keras_saved_model_dir = os.path.join(self._tmp_dir, 'saved_model')
converter.dispatch_tensorflowjs_to_keras_saved_model_conversion(
os.path.join(tfjs_output_dir, 'model.json'), keras_saved_model_dir)
diff --git a/tfjs-converter/python/tensorflowjs/converters/fuse_depthwise_conv2d_test.py b/tfjs-converter/python/tensorflowjs/converters/fuse_depthwise_conv2d_test.py
index b86850df736..31151accea3 100644
--- a/tfjs-converter/python/tensorflowjs/converters/fuse_depthwise_conv2d_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/fuse_depthwise_conv2d_test.py
@@ -19,6 +19,7 @@
import tempfile
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflowjs.converters import fuse_depthwise_conv2d
from tensorflowjs.converters import graph_rewrite_util
@@ -37,11 +38,11 @@ def tearDown(self):
def testFuseDepthwiseConv2dNativeWithBias(self):
layers = [
- tf.keras.layers.DepthwiseConv2D(
+ tf_keras.layers.DepthwiseConv2D(
1, bias_initializer=tf.initializers.constant(0.25))
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 1, 1, 2])
@tf.function
@@ -68,12 +69,12 @@ def execute_model(tensor):
def testFuseDepthwiseConv2dNativeWithBiasAndActivation(self):
layers = [
- tf.keras.layers.DepthwiseConv2D(
+ tf_keras.layers.DepthwiseConv2D(
1, bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.ReLU()
+ tf_keras.layers.ReLU()
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 1, 1, 2])
@tf.function
@@ -101,11 +102,11 @@ def execute_model(tensor):
def testFuseDepthwiseConv2dNativeWithActivation(self):
layers = [
- tf.keras.layers.DepthwiseConv2D(1, use_bias=False),
- tf.keras.layers.ReLU()
+ tf_keras.layers.DepthwiseConv2D(1, use_bias=False),
+ tf_keras.layers.ReLU()
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 1, 1, 2])
@tf.function
diff --git a/tfjs-converter/python/tensorflowjs/converters/fuse_prelu_test.py b/tfjs-converter/python/tensorflowjs/converters/fuse_prelu_test.py
index ffceb14d1bc..5d7157af6d2 100644
--- a/tfjs-converter/python/tensorflowjs/converters/fuse_prelu_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/fuse_prelu_test.py
@@ -19,6 +19,7 @@
import tempfile
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import def_function
@@ -43,13 +44,13 @@ def tearDown(self):
def testFusePrelu(self):
layers = [
- tf.keras.layers.PReLU(
+ tf_keras.layers.PReLU(
alpha_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU(
+ tf_keras.layers.PReLU(
alpha_initializer=tf.initializers.constant(0.25))
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0])
@tf.function
@@ -92,13 +93,13 @@ def execute_model(tensor):
def testFusePreluWithConv2d(self):
layers = [
- tf.keras.layers.Conv2D(
+ tf_keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU()
+ tf_keras.layers.PReLU()
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 2, 1, 1])
@tf.function
@@ -142,14 +143,14 @@ def execute_model(tensor):
def testFusePreluWithMatMul(self):
layers = [
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
2, use_bias=True,
kernel_initializer=tf.initializers.constant(0.25),
bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU()
+ tf_keras.layers.PReLU()
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 2])
@tf.function
@@ -191,12 +192,12 @@ def execute_model(tensor):
def testFusePreluWithDepthwiseConv2d(self):
layers = [
- tf.keras.layers.DepthwiseConv2D(
+ tf_keras.layers.DepthwiseConv2D(
1, bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU()
+ tf_keras.layers.PReLU()
]
- model = tf.keras.Sequential(layers)
- tf.keras.backend.set_learning_phase(0)
+ model = tf_keras.Sequential(layers)
+ tf_keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 2, 1, 1])
@tf.function
diff --git a/tfjs-converter/python/tensorflowjs/converters/generate_test_model.py b/tfjs-converter/python/tensorflowjs/converters/generate_test_model.py
index ab8e990fc91..4b7f02c1752 100644
--- a/tfjs-converter/python/tensorflowjs/converters/generate_test_model.py
+++ b/tfjs-converter/python/tensorflowjs/converters/generate_test_model.py
@@ -23,6 +23,7 @@
import sys
import tensorflow.compat.v2 as tf
+import tf_keras
def parse_args():
parser = argparse.ArgumentParser(
@@ -43,9 +44,9 @@ def parse_args():
def main(_):
if args.model_type == 'tf_keras_h5':
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Dense(5, activation='relu', input_shape=(8,)))
- model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(5, activation='relu', input_shape=(8,)))
+ model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
model.save(os.path.join(args.output_path))
elif args.model_type == 'tf_saved_model':
class TimesThreePlusOne(tf.Module):
diff --git a/tfjs-converter/python/tensorflowjs/converters/keras_h5_conversion_test.py b/tfjs-converter/python/tensorflowjs/converters/keras_h5_conversion_test.py
index f3c95e3aad8..e50dc4825ed 100644
--- a/tfjs-converter/python/tensorflowjs/converters/keras_h5_conversion_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/keras_h5_conversion_test.py
@@ -25,11 +25,11 @@
import tempfile
import unittest
import six
-import keras
import h5py
import numpy as np
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflowjs import version
from tensorflowjs.converters import keras_h5_conversion as conversion
@@ -47,13 +47,13 @@ def tearDown(self):
super(ConvertH5WeightsTest, self).tearDown()
def testConvertWeightsFromSimpleModelNoSplitByLayer(self):
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MyDense10')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False, kernel_initializer='ones', name='MyDense20')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModel.h5')
model.save_weights(h5_path)
@@ -82,13 +82,13 @@ def testConvertWeightsFromSimpleModelNoSplitByLayer(self):
self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))
def testConvertWeightsFromSimpleModelSplitByLayer(self):
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MyDense30')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False, kernel_initializer='ones', name='MyDense40')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModel.h5')
model.save_weights(h5_path)
@@ -120,13 +120,13 @@ def testConvertWeightsFromSimpleModelSplitByLayer(self):
self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))
def testConvertModelWithNestedLayerNames(self):
- model = tf.keras.Sequential()
+ model = tf_keras.Sequential()
# Add a layer with a nested layer name, i.e., a layer name with slash(es)
# in it.
- model.add(tf.keras.layers.Dense(2, input_shape=[12], name='dense'))
- model.add(tf.keras.layers.Dense(8, name='foo/dense'))
- model.add(tf.keras.layers.Dense(4, name='foo/bar/dense'))
+ model.add(tf_keras.layers.Dense(2, input_shape=[12], name='dense'))
+ model.add(tf_keras.layers.Dense(8, name='foo/dense'))
+ model.add(tf_keras.layers.Dense(4, name='foo/bar/dense'))
tfjs_path = os.path.join(self._tmp_dir, 'nested_layer_names_model')
conversion.save_keras_model(model, tfjs_path)
@@ -137,7 +137,7 @@ def testConvertModelWithNestedLayerNames(self):
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'layers-model')
self.assertEqual(model_json['generatedBy'],
- 'keras v%s' % keras.__version__)
+ 'keras v%s' % tf_keras.__version__)
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
@@ -161,14 +161,14 @@ def testConvertModelWithNestedLayerNames(self):
self.assertEqual([4], weight_shapes['foo/bar/dense/bias'])
def testConvertMergedModelFromSimpleModelNoSplitByLayer(self):
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MergedDense10')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False,
kernel_initializer='ones', name='MergedDense20')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModelMerged.h5')
model.save(h5_path)
# Ensure matching legacy serialization format
@@ -191,7 +191,7 @@ def testConvertMergedModelFromSimpleModelNoSplitByLayer(self):
# Check the loaded weights.
# By default, all weights of the model ought to be put in the same group.
self.assertEqual(1, len(groups))
- self.assertEqual(keras.__version__, out['keras_version'])
+ self.assertEqual(tf_keras.__version__, out['keras_version'])
self.assertEqual('tensorflow', out['backend'])
weight_group = groups[0]
self.assertEqual(3, len(weight_group))
@@ -212,14 +212,14 @@ def testConvertMergedModelFromSimpleModelNoSplitByLayer(self):
self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))
def testConvertMergedModelFromSimpleModelSplitByLayer(self):
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MergedDense30')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False,
kernel_initializer='ones', name='MergedDense40')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, 'MyModelMerged.h5')
model.save(h5_path)
# Ensure matching legacy serialization format
@@ -243,7 +243,7 @@ def testConvertMergedModelFromSimpleModelSplitByLayer(self):
# Due to `split_by_layer=True`, there ought to be two weight groups,
# because the model has two layers.
self.assertEqual(2, len(groups))
- self.assertEqual(keras.__version__, out['keras_version'])
+ self.assertEqual(tf_keras.__version__, out['keras_version'])
self.assertEqual('tensorflow', out['backend'])
self.assertEqual(2, len(groups[0]))
kernel1 = groups[0][0]
@@ -264,11 +264,11 @@ def testConvertMergedModelFromSimpleModelSplitByLayer(self):
self.assertTrue(np.allclose(np.ones([4, 2]), kernel2['data']))
def testConvertWeightsFromSequentialModelNoSplitByLayer(self):
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense10'),
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
1, use_bias=False, kernel_initializer='ones', name='Dense20')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
sequential_model.save_weights(h5_path)
@@ -298,11 +298,11 @@ def testConvertWeightsFromSequentialModelNoSplitByLayer(self):
self.assertTrue(np.allclose(np.ones([3, 1]).tolist(), kernel2['data']))
def testConvertWeightsFromSequentialModelSplitByLayer(self):
- sequential_model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(
+ sequential_model = tf_keras.models.Sequential([
+ tf_keras.layers.Dense(
3, input_shape=(2,), use_bias=True, kernel_initializer='ones',
name='Dense30'),
- tf.keras.layers.Dense(
+ tf_keras.layers.Dense(
1, use_bias=False, kernel_initializer='ones', name='Dense40')])
h5_path = os.path.join(self._tmp_dir, 'SequentialModel.h5')
sequential_model.save_weights(h5_path)
@@ -335,10 +335,10 @@ def testConvertWeightsFromSequentialModelSplitByLayer(self):
self.assertTrue(np.allclose(np.ones([3, 1]).tolist(), kernel2['data']))
def testSaveModelSucceedsForNonSequentialModel(self):
- t_input = tf.keras.Input([2])
- dense_layer = tf.keras.layers.Dense(3)
+ t_input = tf_keras.Input([2])
+ dense_layer = tf_keras.layers.Dense(3)
t_output = dense_layer(t_input)
- model = tf.keras.Model(t_input, t_output)
+ model = tf_keras.Model(t_input, t_output)
conversion.save_keras_model(model, self._tmp_dir)
# Verify the content of the artifacts output directory.
@@ -358,12 +358,12 @@ def testSaveModelSucceedsForNonSequentialModel(self):
self.assertIn('paths', weights_manifest[0])
def testSaveModelSucceedsForTfKerasNonSequentialModel(self):
- t_input = tf.keras.Input([2])
- dense_layer = tf.keras.layers.Dense(3)
+ t_input = tf_keras.Input([2])
+ dense_layer = tf_keras.layers.Dense(3)
t_output = dense_layer(t_input)
- model = tf.keras.Model(t_input, t_output)
+ model = tf_keras.Model(t_input, t_output)
- # `tf.keras.Model`s must be compiled before they can be saved.
+ # `tf_keras.Model`s must be compiled before they can be saved.
model.compile(loss='mean_squared_error', optimizer='sgd')
conversion.save_keras_model(model, self._tmp_dir)
@@ -385,12 +385,12 @@ def testSaveModelSucceedsForTfKerasNonSequentialModel(self):
self.assertIn('paths', weights_manifest[0])
def testSaveModelSucceedsForNestedKerasModel(self):
- inner_model = tf.keras.Sequential([
- tf.keras.layers.Dense(4, input_shape=[3], activation='relu'),
- tf.keras.layers.Dense(3, activation='tanh')])
- outer_model = tf.keras.Sequential()
+ inner_model = tf_keras.Sequential([
+ tf_keras.layers.Dense(4, input_shape=[3], activation='relu'),
+ tf_keras.layers.Dense(3, activation='tanh')])
+ outer_model = tf_keras.Sequential()
outer_model.add(inner_model)
- outer_model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ outer_model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
conversion.save_keras_model(outer_model, self._tmp_dir)
@@ -414,9 +414,9 @@ def testSaveModelSucceedsForNestedKerasModel(self):
self.assertEqual(6, len(weight_entries))
def testSaveModelSucceedsForTfKerasSequentialModel(self):
- model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[2])])
+ model = tf_keras.Sequential([tf_keras.layers.Dense(1, input_shape=[2])])
- # `tf.keras.Model`s must be compiled before they can be saved.
+ # `tf_keras.Model`s must be compiled before they can be saved.
model.compile(loss='mean_squared_error', optimizer='sgd')
conversion.save_keras_model(model, self._tmp_dir)
@@ -440,8 +440,8 @@ def testSaveModelSucceedsForTfKerasSequentialModel(self):
def testSavedModelSucceedsForExistingDirAndSequential(self):
artifacts_dir = os.path.join(self._tmp_dir, 'artifacts')
os.makedirs(artifacts_dir)
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Dense(3, input_shape=[2]))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(3, input_shape=[2]))
conversion.save_keras_model(model, artifacts_dir)
# Verify the content of the artifacts output directory.
@@ -461,9 +461,9 @@ def testSavedModelSucceedsForExistingDirAndSequential(self):
self.assertIn('paths', weights_manifest[0])
def testSavedModelSucceedsForCustomShardSize(self):
- model = tf.keras.Sequential([
- tf.keras.layers.Dense(1, input_shape=[2], activation='relu'),
- tf.keras.layers.Dense(3, activation='tanh')
+ model = tf_keras.Sequential([
+ tf_keras.layers.Dense(1, input_shape=[2], activation='relu'),
+ tf_keras.layers.Dense(3, activation='tanh')
])
weights = model.get_weights()
@@ -488,10 +488,10 @@ def testSavedModelRaisesErrorIfArtifactsDirExistsAsAFile(self):
artifacts_dir = os.path.join(self._tmp_dir, 'artifacts')
with open(artifacts_dir, 'wt') as f:
f.write('foo\n')
- t_input = tf.keras.Input([2])
- dense_layer = tf.keras.layers.Dense(3)
+ t_input = tf_keras.Input([2])
+ dense_layer = tf_keras.layers.Dense(3)
t_output = dense_layer(t_input)
- model = tf.keras.Model(t_input, t_output)
+ model = tf_keras.Model(t_input, t_output)
with self.assertRaisesRegexp( # pylint: disable=deprecated-method
ValueError, r'already exists as a file'):
conversion.save_keras_model(model, artifacts_dir)
@@ -514,8 +514,8 @@ def testTranslateBatchNormalizationV1ClassName(self):
self.assertEqual(json_object['config']['layers'][2]['class_name'], 'Dense')
# Assert that converted JSON can be reconstituted as a model object.
- model = tf.keras.models.model_from_json(json.dumps(json_object))
- self.assertIsInstance(model, tf.keras.Sequential)
+ model = tf_keras.models.model_from_json(json.dumps(json_object))
+ self.assertIsInstance(model, tf_keras.Sequential)
self.assertEqual(model.input_shape, (None, 3))
self.assertEqual(model.output_shape, (None, 1))
self.assertEqual(model.layers[0].units, 10)
@@ -539,8 +539,8 @@ def testTranslateUnifiedGRUAndLSTMClassName(self):
self.assertEqual(json_object['config']['layers'][1]['class_name'], 'LSTM')
# Assert that converted JSON can be reconstituted as a model object.
- model = tf.keras.models.model_from_json(json.dumps(json_object))
- self.assertIsInstance(model, tf.keras.Sequential)
+ model = tf_keras.models.model_from_json(json.dumps(json_object))
+ self.assertIsInstance(model, tf_keras.Sequential)
self.assertEqual(model.input_shape, (None, 4, 3))
self.assertEqual(model.output_shape, (None, 2))
diff --git a/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader.py b/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader.py
index c6b1a5f6b29..0b89646b23d 100644
--- a/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader.py
+++ b/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader.py
@@ -25,6 +25,7 @@
import datetime
import six
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflowjs.converters import tf_module_mapper
from tensorflowjs.converters import keras_h5_conversion
from tensorflowjs.converters.tf_module_mapper import TFCLASS_MODULE_MAP
@@ -62,7 +63,7 @@ def _deserialize_keras_model(model_topology_json,
model_topology_json = model_topology_json['model_config']
unique_name_scope = uuid.uuid4().hex if use_unique_name_scope else None
with tf.compat.v1.name_scope(unique_name_scope):
- model = tf.keras.models.model_from_json(json.dumps(model_topology_json))
+ model = tf_keras.models.model_from_json(json.dumps(model_topology_json))
if weight_entries:
weights_dict = dict()
@@ -126,7 +127,7 @@ def _deserialize_keras_keras_model(model_topology_json,
_generate_v3_keys(model_topology_json['model_config'])
model_topology_json = model_topology_json['model_config']
- model = tf.keras.models.model_from_json(json.dumps(model_topology_json))
+ model = tf_keras.models.model_from_json(json.dumps(model_topology_json))
if weight_entries:
weights_dict = dict()
@@ -257,7 +258,7 @@ def load_keras_model(config_json_path,
same TensorFlow Graph or Session context. Default: `False`.
Returns:
- The loaded instance of `tf.keras.Model`.
+ The loaded instance of `tf_keras.Model`.
Raises:
TypeError, if the format of the JSON content of `config_json_path` has an
@@ -324,7 +325,7 @@ def load_keras_keras_model(config_json_path,
same TensorFlow Graph or Session context. Default: `False`.
Returns:
- The loaded instance of `tf.keras.Model`.
+ The loaded instance of `tf_keras.Model`.
Raises:
TypeError, if the format of the JSON content of `config_json_path` has an
diff --git a/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader_test.py b/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader_test.py
index a9a166d716c..39ecdf7d3f2 100755
--- a/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/keras_tfjs_loader_test.py
@@ -27,6 +27,7 @@
import numpy as np
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflowjs.converters import keras_h5_conversion
from tensorflowjs.converters import keras_tfjs_loader
@@ -45,23 +46,21 @@ def tearDown(self):
super(LoadKerasModelTest, self).tearDown()
def _saveKerasModelForTest(self, path):
- model = tf.keras.Sequential()
- model.use_legacy_config = True
- model.add(tf.keras.layers.Dense(
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(
2, input_shape=[12], bias_initializer='random_normal', name='dense'))
- model.add(tf.keras.layers.Dense(
+ model.add(tf_keras.layers.Dense(
8, bias_initializer='random_normal', name='foo/dense'))
- model.add(tf.keras.layers.Dense(
+ model.add(tf_keras.layers.Dense(
4, bias_initializer='random_normal', name='foo/bar/dense'))
keras_h5_conversion.save_keras_model(model, path)
return model
def _saveRNNKerasModelForTest(self, path):
- model = tf.keras.Sequential()
- model.use_legacy_config = True
- model.add(tf.keras.layers.Embedding(100, 20, input_shape=[10]))
- model.add(tf.keras.layers.SimpleRNN(4))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Embedding(100, 20, input_shape=[10]))
+ model.add(tf_keras.layers.SimpleRNN(4))
keras_h5_conversion.save_keras_model(model, path)
return model
@@ -77,7 +76,6 @@ def testLoadKerasModelAndWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.load_keras_model(
os.path.join(tfjs_path, 'model.json'))
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
@@ -100,7 +98,6 @@ def testLoadKerasRNNModelAndWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.load_keras_model(
os.path.join(tfjs_path, 'model.json'))
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
@@ -129,7 +126,6 @@ def testDeserializeKerasModelTopologyOnlyFromBytesIO(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.deserialize_keras_model(buff.read())
- model2.use_legacy_config = True
# The two model JSONs should match exactly.
self.assertEqual(model1.to_json(), model2.to_json())
@@ -148,7 +144,6 @@ def testDeserializeKerasModelTopologyOnlyFromJSONDict(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.deserialize_keras_model(config_json)
- model2.use_legacy_config = True
# The two model JSONs should match exactly.
self.assertEqual(model1.to_json(), model2.to_json())
@@ -174,7 +169,6 @@ def testDeserializeKerasModelTopologyAndWeightsFromBuffers(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.deserialize_keras_model(
json_buff, weight_data=weight_buffers)
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
@@ -204,7 +198,6 @@ def testDeserializeKerasModelTopologyAndWeightsFromFileObjects(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.deserialize_keras_model(
json_file, weight_files)
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
@@ -230,7 +223,6 @@ def testLoadKerasModelWithCurrentWorkingDirectoryRelativePath(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# Use a relative path under the current working directory.
model2 = keras_tfjs_loader.load_keras_model('model.json')
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
@@ -252,7 +244,6 @@ def testLoadKerasModelWithoutWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.load_keras_model(
os.path.join(tfjs_path, 'model.json'), load_weights=False)
- model2.use_legacy_config = True
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
for model1_weight_value, model2_weight_value in zip(
@@ -282,7 +273,6 @@ def testLoadKerasModelFromNonDefaultWeightsPathWorks(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
model2 = keras_tfjs_loader.load_keras_model(
new_model_json_path, weights_path_prefix=tfjs_path)
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
@@ -335,7 +325,6 @@ def testLoadKerasModelFromDataBuffers(self):
model2 = keras_tfjs_loader.load_keras_model(
os.path.join(tfjs_path, 'model.json'),
weights_data_buffers=data_buffers)
- model2.use_legacy_config = True
# Verify the equality of all the weight values.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
@@ -347,12 +336,12 @@ def testLoadKerasModelFromDataBuffers(self):
def testLoadNestedKerasModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- inner_model = tf.keras.Sequential([
- tf.keras.layers.Dense(4, input_shape=[3], activation='relu'),
- tf.keras.layers.Dense(3, activation='tanh')])
- outer_model = tf.keras.Sequential()
+ inner_model = tf_keras.Sequential([
+ tf_keras.layers.Dense(4, input_shape=[3], activation='relu'),
+ tf_keras.layers.Dense(3, activation='tanh')])
+ outer_model = tf_keras.Sequential()
outer_model.add(inner_model)
- outer_model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ outer_model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
x = np.ones([1, 3], dtype=np.float32)
predict_out = outer_model.predict(x)
@@ -367,12 +356,12 @@ def testLoadNestedKerasModel(self):
def testLoadNestedTfKerasModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- inner_model = tf.keras.Sequential([
- tf.keras.layers.Dense(4, input_shape=[3], activation='relu'),
- tf.keras.layers.Dense(3, activation='tanh')])
- outer_model = tf.keras.Sequential()
+ inner_model = tf_keras.Sequential([
+ tf_keras.layers.Dense(4, input_shape=[3], activation='relu'),
+ tf_keras.layers.Dense(3, activation='tanh')])
+ outer_model = tf_keras.Sequential()
outer_model.add(inner_model)
- outer_model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ outer_model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
outer_model.compile(loss='binary_crossentropy', optimizer='sgd')
x = np.ones([1, 3], dtype=np.float32)
@@ -425,18 +414,18 @@ def testInvalidJSONRaisesError(self):
def testLoadFunctionalKerasModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- input1 = tf.keras.Input([4])
- x1 = tf.keras.layers.Dense(2, activation='relu')(input1)
- x1 = tf.keras.layers.BatchNormalization()(x1)
+ input1 = tf_keras.Input([4])
+ x1 = tf_keras.layers.Dense(2, activation='relu')(input1)
+ x1 = tf_keras.layers.BatchNormalization()(x1)
- input2 = tf.keras.Input([10])
- x2 = tf.keras.layers.Dense(5, activation='relu')(input2)
- x2 = tf.keras.layers.BatchNormalization()(x2)
+ input2 = tf_keras.Input([10])
+ x2 = tf_keras.layers.Dense(5, activation='relu')(input2)
+ x2 = tf_keras.layers.BatchNormalization()(x2)
- y = tf.keras.layers.Concatenate()([x1, x2])
- y = tf.keras.layers.Dense(1, activation='sigmoid')(y)
+ y = tf_keras.layers.Concatenate()([x1, x2])
+ y = tf_keras.layers.Dense(1, activation='sigmoid')(y)
- model = tf.keras.Model([input1, input2], y)
+ model = tf_keras.Model([input1, input2], y)
model.compile(loss='binary_crossentropy', optimizer='sgd')
input1_val = np.ones([1, 4])
@@ -454,18 +443,18 @@ def testLoadFunctionalKerasModel(self):
def testLoadFunctionalTfKerasModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
- input1 = tf.keras.Input([4])
- x1 = tf.keras.layers.Dense(2, activation='relu')(input1)
- x1 = tf.keras.layers.BatchNormalization()(x1)
+ input1 = tf_keras.Input([4])
+ x1 = tf_keras.layers.Dense(2, activation='relu')(input1)
+ x1 = tf_keras.layers.BatchNormalization()(x1)
- input2 = tf.keras.Input([10])
- x2 = tf.keras.layers.Dense(5, activation='relu')(input2)
- x2 = tf.keras.layers.BatchNormalization()(x2)
+ input2 = tf_keras.Input([10])
+ x2 = tf_keras.layers.Dense(5, activation='relu')(input2)
+ x2 = tf_keras.layers.BatchNormalization()(x2)
- y = tf.keras.layers.Concatenate()([x1, x2])
- y = tf.keras.layers.Dense(1, activation='sigmoid')(y)
+ y = tf_keras.layers.Concatenate()([x1, x2])
+ y = tf_keras.layers.Dense(1, activation='sigmoid')(y)
- model = tf.keras.Model([input1, input2], y)
+ model = tf_keras.Model([input1, input2], y)
model.compile(loss='binary_crossentropy', optimizer='sgd')
input1_val = np.ones([1, 4])
diff --git a/tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py b/tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
index 8eb000d3d66..d5d97703c38 100644
--- a/tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
@@ -24,6 +24,7 @@
import numpy as np
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflow_decision_forests.keras import GradientBoostedTreesModel
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
@@ -152,28 +153,28 @@ def lookup(input):
def _create_saved_model_with_fusable_conv2d(self, use_bias):
"""Test a basic model with fusable conv2d."""
layers = [
- tf.keras.layers.Conv2D(
+ tf_keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=use_bias),
- tf.keras.layers.BatchNormalization(),
- tf.keras.layers.ReLU()
+ tf_keras.layers.BatchNormalization(),
+ tf_keras.layers.ReLU()
]
- model = tf.keras.Sequential(layers)
+ model = tf_keras.Sequential(layers)
model.predict(tf.ones((1, 224, 224, 3)))
- tf.keras.backend.set_learning_phase(0)
+ tf_keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model_with_fusable_depthwise_conv2d(self):
"""Test a basic model with fusable depthwise conv2d."""
layers = [
- tf.keras.layers.DepthwiseConv2D(
+ tf_keras.layers.DepthwiseConv2D(
1, use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.ReLU()
+ tf_keras.layers.ReLU()
]
- model = tf.keras.Sequential(layers)
+ model = tf_keras.Sequential(layers)
model.predict(tf.ones((1, 2, 2, 3)))
- tf.keras.backend.set_learning_phase(0)
+ tf_keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
@@ -217,30 +218,30 @@ def addV2_conv2d(x):
def _create_saved_model_with_prelu(self):
"""Test a basic model with fusable conv2d."""
layers = [
- tf.keras.layers.Conv2D(
+ tf_keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.DepthwiseConv2D(
+ tf_keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)),
+ tf_keras.layers.DepthwiseConv2D(
1, use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
- tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
+ tf_keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
]
- model = tf.keras.Sequential(layers)
+ model = tf_keras.Sequential(layers)
model.predict(tf.ones((1, 224, 224, 3)))
- tf.keras.backend.set_learning_phase(0)
+ tf_keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model_with_unfusable_prelu(self):
"""Test a basic model with unfusable prelu."""
layers = [
- tf.keras.layers.ReLU(),
- tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
+ tf_keras.layers.ReLU(),
+ tf_keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
]
- model = tf.keras.Sequential(layers)
+ model = tf_keras.Sequential(layers)
model.predict(tf.ones((1, 224, 3)))
- tf.keras.backend.set_learning_phase(0)
+ tf_keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
@@ -343,16 +344,16 @@ def exported_function(x):
def _create_saved_model_with_structured_outputs(self):
def create_input(name):
- return tf.keras.layers.Input(name=name, shape=(1,), dtype=tf.float32)
+ return tf_keras.layers.Input(name=name, shape=(1,), dtype=tf.float32)
input1 = create_input("input1")
input3 = create_input("input3")
input2 = create_input("input2")
- output1 = tf.keras.layers.Dense(1, name='a')
- output1 = output1(tf.keras.layers.concatenate([input1, input3], axis=1))
- output2 = tf.keras.layers.Dense(1, name='b')(input2)
- output3 = tf.keras.layers.Multiply(name='c')([output1, output2])
+ output1 = tf_keras.layers.Dense(1, name='a')
+ output1 = output1(tf_keras.layers.concatenate([input1, input3], axis=1))
+ output2 = tf_keras.layers.Dense(1, name='b')(input2)
+ output3 = tf_keras.layers.Multiply(name='c')([output1, output2])
inputs = {
"input1": input1,
@@ -366,25 +367,10 @@ def create_input(name):
"b": output2
}
- model = tf.keras.Model(inputs=inputs, outputs=outputs)
+ model = tf_keras.Model(inputs=inputs, outputs=outputs)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
- def _create_hub_module(self):
- # Module function that doubles its input.
- def double_module_fn():
- w = tf.Variable([2.0, 4.0])
- x = tf.compat.v1.placeholder(dtype=tf.float32)
- hub.add_signature(inputs=x, outputs=x*w)
- graph = tf.Graph()
- with graph.as_default():
- spec = hub.create_module_spec(double_module_fn)
- m = hub.Module(spec)
- # Export the module.
- with tf.compat.v1.Session(graph=graph) as sess:
- sess.run(tf.compat.v1.global_variables_initializer())
- m.export(os.path.join(self._tmp_dir, HUB_MODULE_DIR), sess)
-
def create_frozen_model(self):
graph = tf.Graph()
saved_model_dir = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR)
@@ -1240,75 +1226,6 @@ def test_convert_saved_model_structured_outputs_false(self):
model_json = json.load(f)
self.assertIs(model_json.get('userDefinedMetadata'), None)
- def test_convert_hub_module_v1(self):
- self._create_hub_module()
- module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
- tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
-
- tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path)
-
- # Check model.json and weights manifest.
- with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
- model_json = json.load(f)
- self.assertTrue(model_json['modelTopology'])
- self.assertIsNot(model_json['modelTopology']['versions'], None)
- signature = model_json['signature']
- self.assertIsNot(signature, None)
- self.assertIsNot(signature['inputs'], None)
- self.assertIsNot(signature['outputs'], None)
-
- weights_manifest = model_json['weightsManifest']
- self.assertCountEqual(weights_manifest[0]['paths'],
- ['group1-shard1of1.bin'])
- self.assertIn('weights', weights_manifest[0])
-
- self.assertTrue(
- glob.glob(
- os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
-
- def test_convert_hub_module_v1_sharded(self):
- self._create_hub_module()
- module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
- tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
-
- # Do initial conversion without sharding.
- tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path)
- weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin'))
-
- # Get size of weights in bytes after graph optimizations.
- optimized_total_weight = sum([os.path.getsize(f) for f in weight_files])
-
- # Due to the shard size, there ought to be 3 shards after conversion.
- weight_shard_size_bytes = int(optimized_total_weight * 0.4)
-
- tfjs_path = os.path.join(self._tmp_dir, 'sharded_model')
- # Convert Hub model again with shard argument set.
- tf_saved_model_conversion_v2.convert_tf_hub_module(
- module_path, tfjs_path,
- weight_shard_size_bytes=weight_shard_size_bytes)
-
- weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin')))
- self.assertEqual(len(weight_files), 3)
- weight_file_sizes = [os.path.getsize(f) for f in weight_files]
-
- self.assertEqual(sum(weight_file_sizes), optimized_total_weight)
- self.assertEqual(weight_file_sizes[0], weight_file_sizes[1])
- self.assertLess(weight_file_sizes[2], weight_file_sizes[0])
-
- def test_convert_hub_module_v1_with_metadata(self):
- self._create_hub_module()
- module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
- tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
-
- metadata_json = {'a': 1}
- tf_saved_model_conversion_v2.convert_tf_hub_module(
- module_path, tfjs_path, metadata={'key': metadata_json})
-
- # Check model.json and weights manifest.
- with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
- model_json = json.load(f)
- self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key'])
-
def test_convert_hub_module_v2(self):
self._create_saved_model()
module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
@@ -1399,8 +1316,8 @@ def test_convert_frozen_model_with_metadata(self):
self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key'])
def test_convert_keras_model_to_saved_model(self):
- keras_model = tf.keras.Sequential(
- [tf.keras.layers.Dense(1, input_shape=[2])])
+ keras_model = tf_keras.Sequential(
+ [tf_keras.layers.Dense(1, input_shape=[2])])
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf_saved_model_conversion_v2.convert_keras_model_to_graph_model(
diff --git a/tfjs-converter/python/tensorflowjs/converters/wizard.py b/tfjs-converter/python/tensorflowjs/converters/wizard.py
index 4a376706880..ce0b0b12ecc 100644
--- a/tfjs-converter/python/tensorflowjs/converters/wizard.py
+++ b/tfjs-converter/python/tensorflowjs/converters/wizard.py
@@ -83,7 +83,7 @@ def detect_saved_model(input_path):
saved_model = loader_impl.parse_saved_model(input_path)
graph_def = saved_model.meta_graphs[0].object_graph_def
if graph_def.nodes:
- if 'tf_keras' in graph_def.nodes[0].user_object.identifier:
+ if any(x in graph_def.nodes[0].user_object.identifier for x in ['tf.keras', 'tf_keras']):
return common.KERAS_SAVED_MODEL
return common.TF_SAVED_MODEL
diff --git a/tfjs-converter/python/tensorflowjs/converters/wizard_test.py b/tfjs-converter/python/tensorflowjs/converters/wizard_test.py
index dce9592307f..05a67a038d7 100644
--- a/tfjs-converter/python/tensorflowjs/converters/wizard_test.py
+++ b/tfjs-converter/python/tensorflowjs/converters/wizard_test.py
@@ -22,6 +22,7 @@
import os
import shutil
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflow.python.eager import def_function
from tensorflow.python.ops import variables
from tensorflow.python.trackable import autotrackable
@@ -52,24 +53,24 @@ def _create_layers_model(self):
json.dump(data, model_file)
def _create_hd5_file(self):
- input_tensor = tf.keras.layers.Input((3,))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3,))
+ dense1 = tf_keras.layers.Dense(
4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros',
name='MyDense10')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2, use_bias=False, kernel_initializer='ones', name='MyDense20')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
h5_path = os.path.join(self._tmp_dir, HD5_FILE_NAME)
print(h5_path)
model.save_weights(h5_path)
def _create_keras_saved_model(self):
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Reshape([2, 3], input_shape=[6]))
- model.add(tf.keras.layers.LSTM(10))
- model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Reshape([2, 3], input_shape=[6]))
+ model.add(tf_keras.layers.LSTM(10))
+ model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
- tf.keras.models.save_model(model, save_dir)
+ tf_keras.models.save_model(model, save_dir)
def _create_saved_model(self):
"""Test a basic model with functions to make sure functions are inlined."""
diff --git a/tfjs-converter/python/test_nightly_pip_package.py b/tfjs-converter/python/test_nightly_pip_package.py
index adb4ec9c542..bfe64678e09 100644
--- a/tfjs-converter/python/test_nightly_pip_package.py
+++ b/tfjs-converter/python/test_nightly_pip_package.py
@@ -25,6 +25,7 @@
import tempfile
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflow.python.saved_model.save import save
class APIAndShellTest(tf.test.TestCase):
@@ -70,7 +71,7 @@ def testConvertTfHubMobileNetV2ToTfjsGraphModel(self):
def testConvertMobileNetV2ModelToTfjsGraphModel(self):
"""create the keras mobilenet v2 model."""
# 1. Create a saved model from keras mobilenet v2.
- model = tf.keras.applications.MobileNetV2()
+ model = tf_keras.applications.MobileNetV2()
save_dir = os.path.join(self._tmp_dir, 'mobilenetv2')
save(model, save_dir)
@@ -93,7 +94,7 @@ def testConvertMobileNetV2ModelToTfjsGraphModel(self):
def testConvertMobileNetV2Hdf5ToTfjsGraphModel(self):
# 1. Create a model for testing.
- model = tf.keras.applications.MobileNetV2()
+ model = tf_keras.applications.MobileNetV2()
h5_path = os.path.join(self._tmp_dir, 'model.h5')
model.save(h5_path)
diff --git a/tfjs-converter/python/test_pip_package.py b/tfjs-converter/python/test_pip_package.py
index 313323ab2cb..bf76d0a14c3 100644
--- a/tfjs-converter/python/test_pip_package.py
+++ b/tfjs-converter/python/test_pip_package.py
@@ -28,6 +28,7 @@
import numpy as np
import tensorflow.compat.v2 as tf
+import tf_keras
from tensorflow.compat.v1 import saved_model
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
@@ -50,21 +51,21 @@ def _createKerasModel(layer_name_prefix, h5_path=None):
in.
Returns:
- An instance of tf.keras.Model.
+ An instance of tf_keras.Model.
"""
- input_tensor = tf.keras.layers.Input((3, ))
- dense1 = tf.keras.layers.Dense(
+ input_tensor = tf_keras.layers.Input((3, ))
+ dense1 = tf_keras.layers.Dense(
4,
use_bias=True,
kernel_initializer='ones',
bias_initializer='zeros',
name=layer_name_prefix + '1')(input_tensor)
- output = tf.keras.layers.Dense(
+ output = tf_keras.layers.Dense(
2,
use_bias=False,
kernel_initializer='ones',
name=layer_name_prefix + '2')(dense1)
- model = tf.keras.models.Model(inputs=[input_tensor], outputs=[output])
+ model = tf_keras.models.Model(inputs=[input_tensor], outputs=[output])
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 3)), steps=1)
@@ -616,7 +617,7 @@ def testConvertTensorflowjsArtifactsToKerasH5(self):
# 4. Load the model back from the new HDF5 file and compare with the
# original model.
with tf.Graph().as_default(), tf.compat.v1.Session():
- model_2 = tf.keras.models.load_model(new_h5_path)
+ model_2 = tf_keras.models.load_model(new_h5_path)
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
@@ -636,7 +637,7 @@ def testLoadTensorflowjsArtifactsAsKerasModel(self):
process.communicate()
self.assertEqual(0, process.returncode)
- # 3. Load the tensorflowjs artifacts as a tf.keras.Model instance.
+ # 3. Load the tensorflowjs artifacts as a tf_keras.Model instance.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
@@ -677,28 +678,28 @@ def tearDown(self):
super(ConvertTfKerasSavedModelTest, self).tearDown()
def _createSimpleSequentialModel(self):
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Reshape([2, 3], input_shape=[6]))
- model.add(tf.keras.layers.LSTM(10))
- model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Reshape([2, 3], input_shape=[6]))
+ model.add(tf_keras.layers.LSTM(10))
+ model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 6)), steps=1)
return model
def _createNestedSequentialModel(self):
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Dense(6, input_shape=[10], activation='relu'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(6, input_shape=[10], activation='relu'))
model.add(self._createSimpleSequentialModel())
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 10)), steps=1)
return model
def _createFunctionalModelWithWeights(self):
- input1 = tf.keras.Input(shape=[8])
- input2 = tf.keras.Input(shape=[10])
- y = tf.keras.layers.Concatenate()([input1, input2])
- y = tf.keras.layers.Dense(4, activation='softmax')(y)
- model = tf.keras.Model([input1, input2], y)
+ input1 = tf_keras.Input(shape=[8])
+ input2 = tf_keras.Input(shape=[10])
+ y = tf_keras.layers.Concatenate()([input1, input2])
+ y = tf_keras.layers.Dense(4, activation='softmax')(y)
+ model = tf_keras.Model([input1, input2], y)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict([tf.ones((1, 8)), tf.ones((1, 10))], steps=1)
return model
@@ -712,7 +713,7 @@ def testConvertTfKerasNestedSequentialSavedModelIntoTfjsFormat(self):
model = self._createNestedSequentialModel()
y = model.predict(x)
- tf.keras.models.save_model(model, self._tmp_dir)
+ tf_keras.models.save_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -739,7 +740,7 @@ def testConvertTfKerasNestedSequentialSavedModelIntoTfjsFormat(self):
# 4. Load the model back and assert on the equality of the predict
# results.
- model_prime = tf.keras.models.load_model(new_h5_path)
+ model_prime = tf_keras.models.load_model(new_h5_path)
new_y = model_prime.predict(x)
self.assertAllClose(y, new_y)
@@ -753,7 +754,7 @@ def testConvertTfKerasFunctionalSavedModelIntoTfjsFormat(self):
model = self._createFunctionalModelWithWeights()
y = model.predict([x1, x2])
- tf.keras.models.save_model(model, self._tmp_dir)
+ tf_keras.models.save_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -781,7 +782,7 @@ def testConvertTfKerasFunctionalSavedModelIntoTfjsFormat(self):
# 4. Load the model back and assert on the equality of the predict
# results.
- model_prime = tf.keras.models.load_model(new_h5_path)
+ model_prime = tf_keras.models.load_model(new_h5_path)
new_y = model_prime.predict([x1, x2])
self.assertAllClose(y, new_y)
@@ -790,7 +791,7 @@ def testUsingIncorrectKerasSavedModelRaisesError(self):
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
- tf.keras.models.save_model(model, self._tmp_dir)
+ tf_keras.models.save_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -819,7 +820,7 @@ def testConvertTfjsLayersModelIntoShardedWeights(self):
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
- tf.keras.models.save_model(model, self._tmp_dir)
+ tf_keras.models.save_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -867,7 +868,7 @@ def testConvertTfjsLayersModelIntoShardedWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# 6. Load the keras model and check the predict() output is close to
# before.
- new_model = tf.keras.models.load_model(new_h5_path)
+ new_model = tf_keras.models.load_model(new_h5_path)
new_y = new_model.predict(x)
self.assertAllClose(new_y, y)
@@ -879,7 +880,7 @@ def testConvertTfjsLayersModelWithLegacyQuantization(self):
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
- tf.keras.models.save_model(model, self._tmp_dir)
+ tf_keras.models.save_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -920,7 +921,7 @@ def testConvertTfjsLayersModelWithQuantization(self):
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
- tf.keras.models.save_model(model, self._tmp_dir)
+ tf_keras.models.save_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
@@ -955,9 +956,9 @@ def testConvertTfjsLayersModelWithQuantization(self):
def testConvertTfjsLayersModelToTfjsGraphModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# 1. Create a model for testing.
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Dense(10, activation='relu', input_shape=[4]))
- model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(10, activation='relu', input_shape=[4]))
+ model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 4)), steps=1)
@@ -993,9 +994,9 @@ def testConvertTfjsLayersModelToTfjsGraphModel(self):
def testConvertTfjsLayersModelToKerasSavedModel(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# 1. Create a model for testing.
- model = tf.keras.Sequential()
- model.add(tf.keras.layers.Dense(10, activation='relu', input_shape=[4]))
- model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
+ model = tf_keras.Sequential()
+ model.add(tf_keras.layers.Dense(10, activation='relu', input_shape=[4]))
+ model.add(tf_keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')
model.predict(tf.ones((1, 4)), steps=1)
From 3fbd65e2f24f0eb2a1dc0652a88e30373a4158f3 Mon Sep 17 00:00:00 2001
From: Branchverse
Date: Fri, 12 Apr 2024 19:51:09 +0200
Subject: [PATCH 04/33] Fixing typos in READMEs (#8213)
* fix: typo in README.md
* Update README.md
---
tfjs-backend-wasm/README.md | 2 +-
tfjs-tflite/README.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tfjs-backend-wasm/README.md b/tfjs-backend-wasm/README.md
index 59397cfb83a..322a80fa5a6 100644
--- a/tfjs-backend-wasm/README.md
+++ b/tfjs-backend-wasm/README.md
@@ -47,7 +47,7 @@ tf.setBackend('wasm').then(() => main());
Starting from Chrome 92 (to be released around July 2021), **cross-origin
isolation** needs to be set up in your site in order to take advantage of
the multi-threading support in WASM backend. Without this, the backend
-will fallback to the WASM binary with SIMD-only support (or the vanila version
+will fallback to the WASM binary with SIMD-only support (or the vanilla version
if SIMD is not enabled). Without multi-threading support, certain models might
not achieve the best performance.
diff --git a/tfjs-tflite/README.md b/tfjs-tflite/README.md
index 391c00b2806..9098768ebc5 100644
--- a/tfjs-tflite/README.md
+++ b/tfjs-tflite/README.md
@@ -88,7 +88,7 @@ enabled by default. In older versions of Chrome, they can be enabled in
Starting from Chrome 92, **cross-origin isolation** needs to be set up in your
site in order to take advantage of the multi-threading support. Without this, it
-will fallback to the WASM binary with SIMD-only support (or the vanila version
+will fallback to the WASM binary with SIMD-only support (or the vanilla version
if SIMD is not enabled). Without multi-threading support, certain models might
not achieve the best performance. See [here][cross origin setup steps] for the
high-level steps to set up the cross-origin isolation.
From baf2364168b39ca4dae85f80ee5258e2c33c4589 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Fri, 12 Apr 2024 23:22:06 +0530
Subject: [PATCH 05/33] Address tfjs-automl typos in documentation strings
(#8219)
---
tfjs-automl/README.md | 8 ++++----
tfjs-automl/src/test_browser.ts | 2 +-
tfjs-automl/src/test_node.ts | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/tfjs-automl/README.md b/tfjs-automl/README.md
index 6dda1584419..1d3732713f9 100644
--- a/tfjs-automl/README.md
+++ b/tfjs-automl/README.md
@@ -48,10 +48,10 @@ const modelUrl = 'model.json'; // URL to the model.json file.
const model = await automl.loadImageClassification(modelUrl);
```
-If you do not want (or cannot) load the model over HTTP you can also load the model separately and directly use the constuctor.
+If you do not want (or cannot) load the model over HTTP you can also load the model separately and directly use the constructor.
This is particularly relevant for __non-browser__ platforms.
-The following psuedocode demonstrates this approach:
+The following pseudocode demonstrates this approach:
```js
import * as automl from '@tensorflow/tfjs-automl';
@@ -138,10 +138,10 @@ const modelUrl = 'model.json'; // URL to the model.json file.
const model = await automl.loadObjectDetection(modelUrl);
```
-If you do not want (or cannot) load the model over HTTP you can also load the model separately and directly use the constuctor.
+If you do not want (or cannot) load the model over HTTP you can also load the model separately and directly use the constructor.
This is particularly relevant for __non-browser__ platforms.
-The following psuedocode demonstrates this approach:
+The following pseudocode demonstrates this approach:
```js
import * as automl from '@tensorflow/tfjs-automl';
diff --git a/tfjs-automl/src/test_browser.ts b/tfjs-automl/src/test_browser.ts
index 989bab4471a..44a65dbfb62 100644
--- a/tfjs-automl/src/test_browser.ts
+++ b/tfjs-automl/src/test_browser.ts
@@ -39,7 +39,7 @@ const testEnv = parseTestEnvFromKarmaFlags(__karma__.config.args, TEST_ENVS);
if (testEnv != null) {
setTestEnvs([testEnv]);
} else {
- // Run browser tests againts both the webgl backends.
+ // Run browser tests against both the webgl backends.
setTestEnvs([
// WebGL.
{
diff --git a/tfjs-automl/src/test_node.ts b/tfjs-automl/src/test_node.ts
index ee60399ea37..4d7b4208582 100644
--- a/tfjs-automl/src/test_node.ts
+++ b/tfjs-automl/src/test_node.ts
@@ -28,7 +28,7 @@ process.on('unhandledRejection', e => {
throw e;
});
-// Run node tests againts the cpu backend.
+// Run node tests against the cpu backend.
setTestEnvs([{name: 'node', backendName: 'cpu'}]);
const runner = new jasmine();
From c027d6a51d826977c7c7dcc47b3a4274044c842f Mon Sep 17 00:00:00 2001
From: Architect
Date: Fri, 12 Apr 2024 15:18:22 -0500
Subject: [PATCH 06/33] Add support for GELU and approximate activation
functions (#8224)
FEATURE
* add docker configs for isolated testing
* implement gelu and gelu_new as separate activations
* Update activations.ts
* Update activations_test.ts
* Update activations_test.ts
* remove docker files
* fix activation tests
* fix lint errors
* remove extra blank line
* fix gelu_new calc
* fix 1D test
---------
Co-authored-by: Ping Yu <4018+pyu10055@users.noreply.github.com>
Co-authored-by: Matthew Soulanille
---
tfjs-layers/src/activations.ts | 74 ++++++++++++--
tfjs-layers/src/activations_test.ts | 97 ++++++++++++++++++-
.../src/keras_format/activation_config.ts | 4 +-
.../layers/nlp/models/gpt2/gpt2_backbone.ts | 3 +-
4 files changed, 166 insertions(+), 12 deletions(-)
diff --git a/tfjs-layers/src/activations.ts b/tfjs-layers/src/activations.ts
index b5758ae2733..12849a47956 100644
--- a/tfjs-layers/src/activations.ts
+++ b/tfjs-layers/src/activations.ts
@@ -209,23 +209,64 @@ export class LogSoftmax extends Activation {
serialization.registerClass(LogSoftmax);
/**
- * Swish activation function
+ * Gelu activation function
*/
-export class Swish extends Activation {
+export class Gelu extends Activation {
/** @nocollapse */
- static readonly className = 'swish';
+ static readonly className = 'gelu';
/**
* Calculate the activation function.
*
* @param x Tensor.
- * @param alpha Scaling factor for the sigmoid function.
* @returns a Tensor of the same shape as x
*/
- apply(x: Tensor, alpha = 1): Tensor {
- return tidy(() => tfc.mul(tfc.sigmoid(tfc.mul(x, alpha)), x));
+ apply(x: Tensor): Tensor {
+ return tidy(() => {
+ return tfc.tidy(() => {
+ const sqrtTwo = Math.sqrt(2);
+ // Compute Φ(x) using the erf function
+ const cdf = tfc.mul(0.5, tfc.add(1, tfc.erf(tfc.div(x, sqrtTwo))));
+ // Compute GELU(x) = x * Φ(x)
+ return tfc.mul(x, cdf);
+ });
+ });
}
}
-serialization.registerClass(Swish);
+serialization.registerClass(Gelu);
+
+/**
+ * GeluNew activation function
+ */
+export class GeluNew extends Activation {
+ /** @nocollapse */
+ static readonly className = 'gelu_new';
+ /**
+ * Calculate the activation function.
+ *
+ * @param x Tensor.
+ * @returns a Tensor of the same shape as x
+ */
+ apply(x: Tensor): Tensor {
+ return tidy(() => {
+ return tfc.mul(
+ 0.5,
+ tfc.mul(
+ x,
+ tfc.add(
+ 1,
+ tfc.tanh(
+ tfc.mul(
+ tfc.sqrt(tfc.div(2, Math.PI)),
+ tfc.add(x, tfc.mul(0.044715, tfc.pow(x, 3)))
+ )
+ )
+ )
+ )
+ );
+ });
+ }
+}
+serialization.registerClass(GeluNew);
/**
* Mish activation function
@@ -245,6 +286,25 @@ export class Mish extends Activation {
}
serialization.registerClass(Mish);
+/**
+ * Swish activation function
+ */
+export class Swish extends Activation {
+ /** @nocollapse */
+ static readonly className = 'swish';
+ /**
+ * Calculate the activation function.
+ *
+ * @param x Tensor.
+ * @param alpha Scaling factor for the sigmoid function.
+ * @returns a Tensor of the same shape as x
+ */
+ apply(x: Tensor, alpha = 1): Tensor {
+ return tidy(() => tfc.mul(tfc.sigmoid(tfc.mul(x, alpha)), x));
+ }
+}
+serialization.registerClass(Swish);
+
export function serializeActivation(activation: Activation): string {
return activation.getClassName();
}
diff --git a/tfjs-layers/src/activations_test.ts b/tfjs-layers/src/activations_test.ts
index bc4d6289812..3ee38dc5d01 100644
--- a/tfjs-layers/src/activations_test.ts
+++ b/tfjs-layers/src/activations_test.ts
@@ -13,7 +13,7 @@
*/
import {scalar, tensor1d, tensor2d, tensor3d} from '@tensorflow/tfjs-core';
-import {Elu, HardSigmoid, Linear, LogSoftmax, Relu, Relu6, Selu, Sigmoid, Softmax, Softplus, Softsign, Tanh, Swish, Mish} from './activations';
+import {Elu, HardSigmoid, Linear, LogSoftmax, Relu, Relu6, Selu, Sigmoid, Softmax, Softplus, Softsign, Tanh, Swish, Mish, Gelu, GeluNew} from './activations';
import {describeMathCPUAndGPU, expectNoLeakedTensors, expectTensorsClose} from './utils/test_utils';
describeMathCPUAndGPU('linear activation', () => {
@@ -366,3 +366,98 @@ describeMathCPUAndGPU('mish activation', () => {
expectNoLeakedTensors(() => mish(initX), 1);
});
});
+
+describeMathCPUAndGPU('gelu activation', () => {
+ const gelu = new Gelu().apply;
+ // Setup: Array with initial values.
+ // Execute: Gelu on the last dimension.
+ // Expect: Output array matches size and approximate expected values.
+ it('1D', () => {
+ const initX = tensor1d([0, 1, 3, 9]);
+ const expectedVals = tensor1d([
+ 0,
+ 0.8413447141647339,
+ 2.995950222015381, 9
+ ]);
+ expectTensorsClose(gelu(initX), expectedVals);
+ });
+ it('1D all equal', () => {
+ const initX = tensor1d([-1, -1, -1, -1]);
+ const expectedVals = tensor1d([
+ -0.15865525603294373,
+ -0.15865525603294373,
+ -0.15865525603294373,
+ -0.15865525603294373
+ ]);
+ expectTensorsClose(gelu(initX), expectedVals);
+ });
+ it('2D', () => {
+ const initX = tensor2d([[0, 1, 3, 9], [0, 1, 3, 9]]);
+ const expectedVals = tensor2d([
+ [0, 0.8413447141647339, 2.995950222015381, 9],
+ [0, 0.8413447141647339, 2.995950222015381, 9]
+ ]);
+ expectTensorsClose(gelu(initX), expectedVals);
+ });
+ it('3D', () => {
+ const initX = tensor3d([[[0, 1, 3, 9], [0, 1, 3, 9]]]);
+ const expectedVals = tensor3d([[
+ [ 0, 0.8413447141647339, 2.995950222015381, 9 ],
+ [ 0, 0.8413447141647339, 2.995950222015381, 9 ]
+ ]]);
+ expectTensorsClose(gelu(initX), expectedVals);
+ });
+ it('Does not leak', () => {
+ const initX = tensor1d([0, 1, 3, 9]);
+ expectNoLeakedTensors(() => gelu(initX), 1);
+ });
+});
+
+describeMathCPUAndGPU('gelu_new activation', () => {
+ const geluNew = new GeluNew().apply;
+ // Setup: Array with initial values.
+ // Execute: GeluNew on the last dimension.
+ // Expect: Output array matches size and approximate expected values.
+ it('1D', () => {
+ const initX = tensor1d([0, 1, 3, 9]);
+ const expectedVals = tensor1d([
+ 0,
+ 0.8411920070648193,
+ 2.9963626861572266,
+ 9
+ ]);
+ expectTensorsClose(geluNew(initX), expectedVals);
+ });
+ it('1D all equal', () => {
+ const initX = tensor1d([-1, -1, -1, -1]);
+ const expectedVals = tensor1d([
+ -0.15880802273750305,
+ -0.15880802273750305,
+ -0.15880802273750305,
+ -0.15880802273750305
+ ]);
+ expectTensorsClose(geluNew(initX), expectedVals);
+ });
+ it('2D', () => {
+ const initX = tensor2d([[0, 1, 3, 9], [0, 1, 3, 9]]);
+ const expectedVals = tensor2d([
+ [ 0, 0.8411920070648193, 2.9963626861572266, 9 ],
+ [ 0, 0.8411920070648193, 2.9963626861572266, 9 ]
+ ]);
+ expectTensorsClose(geluNew(initX), expectedVals);
+ });
+ it('3D', () => {
+ const initX = tensor3d([[[0, 1, 3, 9], [0, 1, 3, 9]]]);
+ const expectedVals = tensor3d([
+ [
+ [ 0, 0.8411920070648193, 2.9963626861572266, 9 ],
+ [ 0, 0.8411920070648193, 2.9963626861572266, 9 ]
+ ]
+ ]);
+ expectTensorsClose(geluNew(initX), expectedVals);
+ });
+ it('Does not leak', () => {
+ const initX = tensor1d([0, 1, 3, 9]);
+ expectNoLeakedTensors(() => geluNew(initX), 1);
+ });
+});
diff --git a/tfjs-layers/src/keras_format/activation_config.ts b/tfjs-layers/src/keras_format/activation_config.ts
index 791d622f1dd..92c4cc55de2 100644
--- a/tfjs-layers/src/keras_format/activation_config.ts
+++ b/tfjs-layers/src/keras_format/activation_config.ts
@@ -15,7 +15,7 @@ import {stringLiteralArray} from './utils';
*/
export const activationOptions = stringLiteralArray([
'elu', 'hard_sigmoid', 'linear', 'relu', 'relu6', 'selu', 'sigmoid',
- 'softmax', 'softplus', 'softsign', 'tanh', 'swish', 'mish'
+ 'softmax', 'softplus', 'softsign', 'tanh', 'swish', 'mish', 'gelu', 'gelu_new'
]);
/**
@@ -28,4 +28,4 @@ export type ActivationSerialization = typeof activationOptions[number];
// e.g. to src/common.ts. Maybe even duplicate *all* of these to be pedantic?
/** @docinline */
export type ActivationIdentifier = 'elu'|'hardSigmoid'|'linear'|'relu'|'relu6'|
- 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish';
+ 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new';
diff --git a/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_backbone.ts b/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_backbone.ts
index 971f8868cc6..a90158fd17a 100644
--- a/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_backbone.ts
+++ b/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_backbone.ts
@@ -170,8 +170,7 @@ export class GPT2Backbone extends Backbone {
numHeads: args.numHeads,
dropout: args.dropout,
layerNormEpsilon: 1e-05,
- // TODO(pforderique): Implement gelu.
- activation: getActivation('relu'),
+ activation: getActivation('gelu'),
kernelInitializer: gpt2KernelInitializer(0.02),
normalizeFirst: true,
name: `transformer_layer_${i}`,
From 63250eceec9dab31f10345e77722080b17100bc7 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Sat, 13 Apr 2024 01:51:11 +0530
Subject: [PATCH 07/33] Address tfjs-react-native typos in documentation
strings (#8217)
---
tfjs-react-native/DEVELOPMENT.md | 13 +-
.../integration_rn59/components/ml.ts | 69 +-
.../components/tfjs_unit_test_runner.tsx | 2 +-
.../src/bundle_resource_io_test.ts | 302 +++---
.../src/camera/camera_stream.tsx | 4 +-
tfjs-react-native/src/camera/camera_test.ts | 979 +++++++++---------
.../src/platform_react_native.ts | 222 ++--
7 files changed, 840 insertions(+), 751 deletions(-)
diff --git a/tfjs-react-native/DEVELOPMENT.md b/tfjs-react-native/DEVELOPMENT.md
index a27676e346a..d5ba2f8b93c 100644
--- a/tfjs-react-native/DEVELOPMENT.md
+++ b/tfjs-react-native/DEVELOPMENT.md
@@ -1,11 +1,11 @@
# Development
-This file will document some of the differences from the regular developement workflow in [DEVELOPMENT.md](../DEVELOPMENT.md). You should read that document first to get familiar with typical TensorFlow.js development workflow.
+This file will document some of the differences from the regular development workflow in [DEVELOPMENT.md](../DEVELOPMENT.md). You should read that document first to get familiar with typical TensorFlow.js development workflow.
Development and testing for tfjs-react-native is somewhat different from the packages like tfjs-core or tfjs-layers for a few reasons:
-- __Dependency on having a physical mobile device to run__: While the CPU backend can run in a simulator, the WebGL one requires running on a physical device. So most of the time you will want to test something using a mobile device connected to your computer.
-- __No browser or node environment__: We are running JavaScript outside of a browser and outside of node. We thus have to make sure we don't include things that depend on those two environments.
+- **Dependency on having a physical mobile device to run**: While the CPU backend can run in a simulator, the WebGL one requires running on a physical device. So most of the time you will want to test something using a mobile device connected to your computer.
+- **No browser or node environment**: We are running JavaScript outside of a browser and outside of node. We thus have to make sure we don't include things that depend on those two environments.
## Key Terms & Caveats
@@ -13,7 +13,7 @@ These are a few key terms/technologies to be familiar with that are different fr
- [React Native](https://facebook.github.io/react-native/) — This is the framework that this package targets.
- [Metro](https://facebook.github.io/metro/) — This is the bundler used to create the JavaScript bundle that is loaded into the native app by react native.
- - The bundle needs to be created at 'compile time' thus all imports/requires need to be resolved. Thus _dynamic_ `import`s/`require`s are __statically resolved__. So you cannot exclude a require with a conditional in JS code.
+ - The bundle needs to be created at 'compile time' thus all imports/requires need to be resolved. Thus _dynamic_ `import`s/`require`s are **statically resolved**. So you cannot exclude a require with a conditional in JS code.
- Since tfjs does dynamic `require`'s of certain node libraries that are not present in react native, files that do that need to be excluded from the metro build process. For end users, this is documented in the [README](../README.md), but it also happens in `integration_rn59/prep_tests.ts`.
- Metro does not play well with symlinks, so if you are trying to develop against a local build of tfjs, copy the dist folder into the app's node_modules as appropriate. Do not use yalc.
- [.ipa](https://en.wikipedia.org/wiki/.ipa) & [.apk](https://en.wikipedia.org/wiki/Android_application_package) — These are the formats for the final native bundle that is put on an iOS and Android device. They are created by their respective dev tools, [XCode](https://developer.apple.com/xcode/) and [Android Studio](https://developer.android.com/studio).
@@ -33,8 +33,9 @@ Unit tests from tfjs-core are imported into a react native application and run a
Because these are part of an app to run them you must compile and run the integration_rn59 of the target device. There is a button in that app to start the unit tests.
This is _automated in CI_ and runs on:
- - Changes to tfjs-core: [Tests will be run against HEAD of tfjs-core](../tfjs-core/cloudbuild.yml)
- - Changes to tfjs-react-native: [Tests will be run against the **published** version](./cloudbuild.yml) of tfjs on npm that is references in `integration_rn59/package.json`
+
+- Changes to tfjs-core: [Tests will be run against HEAD of tfjs-core](../tfjs-core/cloudbuild.yml)
+- Changes to tfjs-react-native: [Tests will be run against the **published** version](./cloudbuild.yml) of tfjs on npm that is references in `integration_rn59/package.json`
### Other integration tests
diff --git a/tfjs-react-native/integration_rn59/components/ml.ts b/tfjs-react-native/integration_rn59/components/ml.ts
index 7639493fbb3..8af7486f697 100644
--- a/tfjs-react-native/integration_rn59/components/ml.ts
+++ b/tfjs-react-native/integration_rn59/components/ml.ts
@@ -15,9 +15,12 @@
* =============================================================================
*/
-import * as mobilenet from '@tensorflow-models/mobilenet';
-import * as tf from '@tensorflow/tfjs';
-import {asyncStorageIO, bundleResourceIO} from '@tensorflow/tfjs-react-native';
+import * as mobilenet from "@tensorflow-models/mobilenet";
+import * as tf from "@tensorflow/tfjs";
+import {
+ asyncStorageIO,
+ bundleResourceIO,
+} from "@tensorflow/tfjs-react-native";
// All functions (i.e. 'runners") in this file are async
// functions that return a function that can be invoked to
@@ -64,11 +67,12 @@ export async function mobilenetRunner() {
* A runner that loads a model bundled with the app and runs a prediction
* through it.
*/
-const modelJson = require('../assets/model/bundle_model_test.json');
-const modelWeights = require('../assets/model/bundle_model_test_weights.bin');
+const modelJson = require("../assets/model/bundle_model_test.json");
+const modelWeights = require("../assets/model/bundle_model_test_weights.bin");
export async function localModelRunner() {
- const model =
- await tf.loadLayersModel(bundleResourceIO(modelJson, modelWeights));
+ const model = await tf.loadLayersModel(
+ bundleResourceIO(modelJson, modelWeights)
+ );
return async () => {
const res = model.predict(tf.randomNormal([1, 10])) as tf.Tensor;
@@ -81,11 +85,12 @@ export async function localModelRunner() {
* A runner that loads a model bundled with the app and runs a prediction
* through it.
*/
-const modelJson2 = require('../assets/graph_model/model.json');
-const modelWeights2 = require('../assets/graph_model/group1-shard1of1.bin');
+const modelJson2 = require("../assets/graph_model/model.json");
+const modelWeights2 = require("../assets/graph_model/group1-shard1of1.bin");
export async function localGraphModelRunner() {
- const model =
- await tf.loadGraphModel(bundleResourceIO(modelJson2, modelWeights2));
+ const model = await tf.loadGraphModel(
+ bundleResourceIO(modelJson2, modelWeights2)
+ );
return async () => {
const res = model.predict(tf.randomNormal([1, 10])) as tf.Tensor;
const data = await res.data();
@@ -97,33 +102,35 @@ export async function localGraphModelRunner() {
* A runner that loads a sharded model bundled with the app and runs a
* prediction through it.
*/
-const shardedModelJson = require('../assets/sharded_model/model.json');
-const shardedModelWeights1: number =
- require('../assets/sharded_model/group1-shard1of2.bin');
-const shardedModelWeights2: number =
- require('../assets/sharded_model/group1-shard2of2.bin');
+const shardedModelJson = require("../assets/sharded_model/model.json");
+const shardedModelWeights1: number = require("../assets/sharded_model/group1-shard1of2.bin");
+const shardedModelWeights2: number = require("../assets/sharded_model/group1-shard2of2.bin");
export async function localShardedGraphModelRunner() {
- const model = await tf.loadGraphModel(bundleResourceIO(
- shardedModelJson, [shardedModelWeights1, shardedModelWeights2]));
+ const model = await tf.loadGraphModel(
+ bundleResourceIO(shardedModelJson, [
+ shardedModelWeights1,
+ shardedModelWeights2,
+ ])
+ );
return async () => {
const input = tf.zeros([1, 224, 224, 3]);
const res = model.predict(input) as tf.Tensor;
const data = await res.data();
- return JSON.stringify({predictionsLength: data.length});
+ return JSON.stringify({ predictionsLength: data.length });
};
}
/**
- * A runner that traines a model.
+ * A runner that trains a model.
*/
export async function trainModelRunner() {
// Define a model for linear regression.
const model = tf.sequential();
- model.add(tf.layers.dense({units: 5, inputShape: [1]}));
- model.add(tf.layers.dense({units: 1}));
- model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});
+ model.add(tf.layers.dense({ units: 5, inputShape: [1] }));
+ model.add(tf.layers.dense({ units: 1 }));
+ model.compile({ loss: "meanSquaredError", optimizer: "sgd" });
// Generate some synthetic data for training.
const xs = tf.tensor2d([1, 2, 3, 4], [4, 1]);
@@ -131,9 +138,9 @@ export async function trainModelRunner() {
return async () => {
// Train the model using the data.
- await model.fit(xs, ys, {epochs: 20});
+ await model.fit(xs, ys, { epochs: 20 });
- return 'done';
+ return "done";
};
}
@@ -143,14 +150,14 @@ export async function trainModelRunner() {
export async function saveModelRunner() {
// Define a model for linear regression.
const model = tf.sequential();
- model.add(tf.layers.dense({units: 5, inputShape: [1]}));
- model.add(tf.layers.dense({units: 1}));
- model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});
+ model.add(tf.layers.dense({ units: 5, inputShape: [1] }));
+ model.add(tf.layers.dense({ units: 1 }));
+ model.compile({ loss: "meanSquaredError", optimizer: "sgd" });
return async () => {
- await model.save(asyncStorageIO('custom-model-test'));
- await tf.loadLayersModel(asyncStorageIO('custom-model-test'));
+ await model.save(asyncStorageIO("custom-model-test"));
+ await tf.loadLayersModel(asyncStorageIO("custom-model-test"));
- return 'done';
+ return "done";
};
}
diff --git a/tfjs-react-native/integration_rn59/components/tfjs_unit_test_runner.tsx b/tfjs-react-native/integration_rn59/components/tfjs_unit_test_runner.tsx
index c1f2792bcbb..5ec1a022340 100644
--- a/tfjs-react-native/integration_rn59/components/tfjs_unit_test_runner.tsx
+++ b/tfjs-react-native/integration_rn59/components/tfjs_unit_test_runner.tsx
@@ -123,7 +123,7 @@ export class TestRunner extends Component {
const reactReporter: jasmine.CustomReporter = {
jasmineStarted: suiteInfo => {
// The console.warn below seems necessary in order for the spy on
- // console.warn defined in one of the tests to run corrently.
+ // console.warn defined in one of the tests to run currently.
console.warn('starting tests');
//@ts-ignore
console.reportErrorsAsExceptions = false;
diff --git a/tfjs-react-native/src/bundle_resource_io_test.ts b/tfjs-react-native/src/bundle_resource_io_test.ts
index 2fc8b181def..d6c24b0e7dc 100644
--- a/tfjs-react-native/src/bundle_resource_io_test.ts
+++ b/tfjs-react-native/src/bundle_resource_io_test.ts
@@ -15,91 +15,95 @@
* =============================================================================
*/
-import './platform_react_native';
+import "./platform_react_native";
-import * as tf from '@tensorflow/tfjs-core';
+import * as tf from "@tensorflow/tfjs-core";
// tslint:disable-next-line: no-imports-from-dist
-import {describeWithFlags} from '@tensorflow/tfjs-core/dist/jasmine_util';
+import { describeWithFlags } from "@tensorflow/tfjs-core/dist/jasmine_util";
-import {bundleResourceIO} from './bundle_resource_io';
-import * as tfjsRn from './platform_react_native';
-import {RN_ENVS} from './test_env_registry';
+import { bundleResourceIO } from "./bundle_resource_io";
+import * as tfjsRn from "./platform_react_native";
+import { RN_ENVS } from "./test_env_registry";
-describeWithFlags('BundleResourceIO', RN_ENVS, () => {
+describeWithFlags("BundleResourceIO", RN_ENVS, () => {
// Test data.
const modelTopology1: {} = {
- 'class_name': 'Sequential',
- 'keras_version': '2.1.4',
- 'config': [{
- 'class_name': 'Dense',
- 'config': {
- 'kernel_initializer': {
- 'class_name': 'VarianceScaling',
- 'config': {
- 'distribution': 'uniform',
- 'scale': 1.0,
- 'seed': null,
- 'mode': 'fan_avg'
- }
+ class_name: "Sequential",
+ keras_version: "2.1.4",
+ config: [
+ {
+ class_name: "Dense",
+ config: {
+ kernel_initializer: {
+ class_name: "VarianceScaling",
+ config: {
+ distribution: "uniform",
+ scale: 1.0,
+ seed: null,
+ mode: "fan_avg",
+ },
+ },
+ name: "dense",
+ kernel_constraint: null,
+ bias_regularizer: null,
+ bias_constraint: null,
+ dtype: "float32",
+ activation: "linear",
+ trainable: true,
+ kernel_regularizer: null,
+ bias_initializer: { class_name: "Zeros", config: {} },
+ units: 1,
+ batch_input_shape: [null, 3],
+ use_bias: true,
+ activity_regularizer: null,
},
- 'name': 'dense',
- 'kernel_constraint': null,
- 'bias_regularizer': null,
- 'bias_constraint': null,
- 'dtype': 'float32',
- 'activation': 'linear',
- 'trainable': true,
- 'kernel_regularizer': null,
- 'bias_initializer': {'class_name': 'Zeros', 'config': {}},
- 'units': 1,
- 'batch_input_shape': [null, 3],
- 'use_bias': true,
- 'activity_regularizer': null
- }
- }],
- 'backend': 'tensorflow'
+ },
+ ],
+ backend: "tensorflow",
};
const weightSpecs1: tf.io.WeightsManifestEntry[] = [
{
- name: 'dense/kernel',
+ name: "dense/kernel",
shape: [3, 1],
- dtype: 'float32',
+ dtype: "float32",
},
{
- name: 'dense/bias',
+ name: "dense/bias",
shape: [1],
- dtype: 'float32',
- }
+ dtype: "float32",
+ },
];
const weightData1 = new ArrayBuffer(16);
- it('constructs an IOHandler', async () => {
+ it("constructs an IOHandler", async () => {
const modelJson: tf.io.ModelJSON = {
modelTopology: modelTopology1,
- weightsManifest: [{
- paths: [],
- weights: weightSpecs1,
- }]
-
+ weightsManifest: [
+ {
+ paths: [],
+ weights: weightSpecs1,
+ },
+ ],
};
const resourceId = 1;
const handler = bundleResourceIO(modelJson, resourceId);
- expect(typeof handler.load).toBe('function');
- expect(typeof handler.save).toBe('function');
+ expect(typeof handler.load).toBe("function");
+ expect(typeof handler.save).toBe("function");
});
- it('loads model artifacts', async () => {
+ it("loads model artifacts", async () => {
const response = new Response(weightData1);
- spyOn(tfjsRn, 'fetch').and.returnValue(Promise.resolve(response));
+ spyOn(tfjsRn, "fetch").and.returnValue(Promise.resolve(response));
const modelJson: tf.io.ModelJSON = {
modelTopology: modelTopology1,
- weightsManifest: [{
- paths: [],
- weights: weightSpecs1,
- }]
-
+ weightsManifest: [
+ {
+ paths: [],
+ weights: weightSpecs1,
+ },
+ ],
};
const resourceId = 1;
const handler = bundleResourceIO(modelJson, resourceId);
@@ -111,10 +115,11 @@ describeWithFlags('BundleResourceIO', RN_ENVS, () => {
expect(loaded.weightData).toEqual(weightData1);
});
- it('errors on string modelJSON', async () => {
+ it("errors on string modelJSON", async () => {
const response = new Response(weightData1);
- spyOn(tf.env().platform, 'fetch')
- .and.returnValue(Promise.resolve(response));
+ spyOn(tf.env().platform, "fetch").and.returnValue(
+ Promise.resolve(response)
+ );
const modelJson = `{
modelTopology: modelTopology1,
@@ -124,100 +129,102 @@ describeWithFlags('BundleResourceIO', RN_ENVS, () => {
}]
}`;
const resourceId = 1;
- expect(
- () => bundleResourceIO(
- modelJson as unknown as tf.io.ModelJSON, resourceId))
- .toThrow(new Error(
- 'modelJson must be a JavaScript object (and not a string).\n' +
- 'Have you wrapped yor asset path in a require() statment?'));
+ expect(() =>
+ bundleResourceIO(modelJson as unknown as tf.io.ModelJSON, resourceId)
+ ).toThrow(
+ new Error(
+ "modelJson must be a JavaScript object (and not a string).\n" +
+ "Have you wrapped yor asset path in a require() statement?"
+ )
+ );
});
});
-describeWithFlags('BundleResourceIO Sharded', RN_ENVS, () => {
+describeWithFlags("BundleResourceIO Sharded", RN_ENVS, () => {
// Test data.
const modelTopology: {} = {
- 'class_name': 'Sequential',
- 'keras_version': '2.1.4',
- 'config': [
+ class_name: "Sequential",
+ keras_version: "2.1.4",
+ config: [
{
- 'class_name': 'Dense',
- 'config': {
- 'kernel_initializer': {
- 'class_name': 'VarianceScaling',
- 'config': {
- 'distribution': 'uniform',
- 'scale': 1.0,
- 'seed': null,
- 'mode': 'fan_avg'
- }
+ class_name: "Dense",
+ config: {
+ kernel_initializer: {
+ class_name: "VarianceScaling",
+ config: {
+ distribution: "uniform",
+ scale: 1.0,
+ seed: null,
+ mode: "fan_avg",
+ },
},
- 'name': 'dense',
- 'kernel_constraint': null,
- 'bias_regularizer': null,
- 'bias_constraint': null,
- 'dtype': 'float32',
- 'activation': 'linear',
- 'trainable': true,
- 'kernel_regularizer': null,
- 'bias_initializer': {'class_name': 'Zeros', 'config': {}},
- 'units': 1,
- 'batch_input_shape': [null, 3],
- 'use_bias': true,
- 'activity_regularizer': null
- }
+ name: "dense",
+ kernel_constraint: null,
+ bias_regularizer: null,
+ bias_constraint: null,
+ dtype: "float32",
+ activation: "linear",
+ trainable: true,
+ kernel_regularizer: null,
+ bias_initializer: { class_name: "Zeros", config: {} },
+ units: 1,
+ batch_input_shape: [null, 3],
+ use_bias: true,
+ activity_regularizer: null,
+ },
},
{
- 'class_name': 'Dense',
- 'config': {
- 'kernel_initializer': {
- 'class_name': 'VarianceScaling',
- 'config': {
- 'distribution': 'uniform',
- 'scale': 1.0,
- 'seed': null,
- 'mode': 'fan_avg'
- }
+ class_name: "Dense",
+ config: {
+ kernel_initializer: {
+ class_name: "VarianceScaling",
+ config: {
+ distribution: "uniform",
+ scale: 1.0,
+ seed: null,
+ mode: "fan_avg",
+ },
},
- 'name': 'dense2',
- 'kernel_constraint': null,
- 'bias_regularizer': null,
- 'bias_constraint': null,
- 'dtype': 'float32',
- 'activation': 'linear',
- 'trainable': true,
- 'kernel_regularizer': null,
- 'bias_initializer': {'class_name': 'Zeros', 'config': {}},
- 'units': 1,
- 'batch_input_shape': [null, 3],
- 'use_bias': true,
- 'activity_regularizer': null
- }
- }
+ name: "dense2",
+ kernel_constraint: null,
+ bias_regularizer: null,
+ bias_constraint: null,
+ dtype: "float32",
+ activation: "linear",
+ trainable: true,
+ kernel_regularizer: null,
+ bias_initializer: { class_name: "Zeros", config: {} },
+ units: 1,
+ batch_input_shape: [null, 3],
+ use_bias: true,
+ activity_regularizer: null,
+ },
+ },
],
- 'backend': 'tensorflow'
+ backend: "tensorflow",
};
const weightSpecs: tf.io.WeightsManifestEntry[] = [
{
- name: 'dense/kernel',
+ name: "dense/kernel",
shape: [3, 1],
- dtype: 'float32',
+ dtype: "float32",
},
{
- name: 'dense/bias',
+ name: "dense/bias",
shape: [1],
- dtype: 'float32',
+ dtype: "float32",
},
{
- name: 'dense2/kernel',
+ name: "dense2/kernel",
shape: [3, 1],
- dtype: 'float32',
+ dtype: "float32",
},
{
- name: 'dense2/bias',
+ name: "dense2/bias",
shape: [1],
- dtype: 'float32',
- }
+ dtype: "float32",
+ },
];
const weightData1 = new ArrayBuffer(16);
const weightData2 = new ArrayBuffer(16);
@@ -225,35 +232,36 @@ describeWithFlags('BundleResourceIO Sharded', RN_ENVS, () => {
const combinedWeightsExpected = new ArrayBuffer(32);
- it('constructs an IOHandler', async () => {
+ it("constructs an IOHandler", async () => {
const modelJson: tf.io.ModelJSON = {
modelTopology,
- weightsManifest: [{
- paths: [],
- weights: weightSpecs,
- }]
-
+ weightsManifest: [
+ {
+ paths: [],
+ weights: weightSpecs,
+ },
+ ],
};
const handler = bundleResourceIO(modelJson, resourceIds);
- expect(typeof handler.load).toBe('function');
- expect(typeof handler.save).toBe('function');
+ expect(typeof handler.load).toBe("function");
+ expect(typeof handler.save).toBe("function");
});
- it('loads model artifacts', async () => {
- spyOn(tf.env().platform, 'fetch')
- .and.returnValues(
- Promise.resolve(new Response(weightData1)),
- Promise.resolve(new Response(weightData2)),
- );
+ it("loads model artifacts", async () => {
+ spyOn(tf.env().platform, "fetch").and.returnValues(
+ Promise.resolve(new Response(weightData1)),
+ Promise.resolve(new Response(weightData2))
+ );
const modelJson: tf.io.ModelJSON = {
modelTopology,
- weightsManifest: [{
- paths: [],
- weights: weightSpecs,
- }]
-
+ weightsManifest: [
+ {
+ paths: [],
+ weights: weightSpecs,
+ },
+ ],
};
const handler = bundleResourceIO(modelJson, resourceIds);
diff --git a/tfjs-react-native/src/camera/camera_stream.tsx b/tfjs-react-native/src/camera/camera_stream.tsx
index cf425d4805e..5eebe8498eb 100644
--- a/tfjs-react-native/src/camera/camera_stream.tsx
+++ b/tfjs-react-native/src/camera/camera_stream.tsx
@@ -110,7 +110,7 @@ const DEFAULT_USE_CUSTOM_SHADERS_TO_RESIZE = false;
* gl: ExpoWebGLRenderingContext,
* cameraTexture: WebGLTexture
* ) => void — When the component is mounted and ready this callback will
- * be called and recieve the following 3 elements:
+ * be called and receive the following 3 elements:
* - __images__ is a (iterator)[https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators]
* that yields tensors representing the camera image on demand.
* - __updateCameraPreview__ is a function that will update the WebGL render
@@ -228,7 +228,7 @@ export function cameraWithTensors(
}
/**
- * Callback for GL context creation. We do mose of the work of setting
+ * Callback for GL context creation. We do more of the work of setting
* up the component here.
* @param gl
*/
diff --git a/tfjs-react-native/src/camera/camera_test.ts b/tfjs-react-native/src/camera/camera_test.ts
index 45c8b95b8db..010740e8603 100644
--- a/tfjs-react-native/src/camera/camera_test.ts
+++ b/tfjs-react-native/src/camera/camera_test.ts
@@ -15,15 +15,15 @@
* =============================================================================
*/
-import * as tf from '@tensorflow/tfjs-core';
-import {test_util} from '@tensorflow/tfjs-core';
+import * as tf from "@tensorflow/tfjs-core";
+import { test_util } from "@tensorflow/tfjs-core";
// tslint:disable-next-line: no-imports-from-dist
-import {describeWithFlags} from '@tensorflow/tfjs-core/dist/jasmine_util';
-import {ExpoWebGLRenderingContext, GLView} from 'expo-gl';
+import { describeWithFlags } from "@tensorflow/tfjs-core/dist/jasmine_util";
+import { ExpoWebGLRenderingContext, GLView } from "expo-gl";
-import {RN_ENVS} from '../test_env_registry';
+import { RN_ENVS } from "../test_env_registry";
-import {detectGLCapabilities, fromTexture, toTexture} from './camera';
+import { detectGLCapabilities, fromTexture, toTexture } from "./camera";
async function createGLContext(): Promise {
return GLView.createContextAsync();
@@ -33,20 +33,24 @@ const expectArraysEqual = test_util.expectArraysEqual;
let gl: ExpoWebGLRenderingContext;
-describeWithFlags('toTexture', RN_ENVS, () => {
+describeWithFlags("toTexture", RN_ENVS, () => {
beforeAll(async () => {
if (gl == null) {
gl = await createGLContext();
}
});
- it('should not throw', async () => {
+ it("should not throw", async () => {
const height = 2;
const width = 2;
const depth = 4;
- const inTensor: tf.Tensor3D =
- tf.truncatedNormal([height, width, depth], 127, 40, 'int32');
+ const inTensor: tf.Tensor3D = tf.truncatedNormal(
+ [height, width, depth],
+ 127,
+ 40,
+ "int32"
+ );
let texture: WebGLTexture;
expect(async () => {
@@ -56,57 +60,82 @@ describeWithFlags('toTexture', RN_ENVS, () => {
expect(texture instanceof WebGLTexture);
});
- it('should roundtrip succesfully', async () => {
+ it("should roundtrip successfully", async () => {
const height = 2;
const width = 2;
const depth = 4;
- const inTensor: tf.Tensor3D =
- tf.truncatedNormal([height, width, depth], 127, 40, 'int32');
+ const inTensor: tf.Tensor3D = tf.truncatedNormal(
+ [height, width, depth],
+ 127,
+ 40,
+ "int32"
+ );
const texture = await toTexture(gl, inTensor);
const outTensor = fromTexture(
- gl, texture, {width, height, depth}, {width, height, depth}, true);
+ gl,
+ texture,
+ { width, height, depth },
+ { width, height, depth },
+ true
+ );
expectArraysEqual(await inTensor.data(), await outTensor.data());
expectArraysEqual(inTensor.shape, outTensor.shape);
});
- it('throws if tensor is not int32 dtype', async () => {
+ it("throws if tensor is not int32 dtype", async () => {
const height = 2;
const width = 2;
const depth = 4;
- const floatInput: tf.Tensor3D =
- tf.truncatedNormal([height, width, depth], 127, 40, 'float32');
+ const floatInput: tf.Tensor3D = tf.truncatedNormal(
+ [height, width, depth],
+ 127,
+ 40,
+ "float32"
+ );
expectAsync(toTexture(gl, floatInput)).toBeRejected();
});
- it('throws if tensor is not a tensor3d dtype', async () => {
+ it("throws if tensor is not a tensor3d dtype", async () => {
const batch = 2;
const height = 2;
const width = 2;
const depth = 4;
- const oneDInput: tf.Tensor1D =
- tf.truncatedNormal([height], 127, 40, 'int32');
+ const oneDInput: tf.Tensor1D = tf.truncatedNormal(
+ [height],
+ 127,
+ 40,
+ "int32"
+ );
//@ts-ignore
expectAsync(toTexture(gl, oneDInput)).toBeRejected();
- const twoDInput: tf.Tensor2D =
- tf.truncatedNormal([height, width], 127, 40, 'int32');
+ const twoDInput: tf.Tensor2D = tf.truncatedNormal(
+ [height, width],
+ 127,
+ 40,
+ "int32"
+ );
//@ts-ignore
expectAsync(toTexture(gl, twoDInput)).toBeRejected();
- const fourDInput: tf.Tensor4D =
- tf.truncatedNormal([batch, height, width, depth], 127, 40, 'int32');
+ const fourDInput: tf.Tensor4D = tf.truncatedNormal(
+ [batch, height, width, depth],
+ 127,
+ 40,
+ "int32"
+ );
//@ts-ignore
expectAsync(toTexture(gl, fourDInput)).toBeRejected();
});
});
-describeWithFlags('fromTexture:nearestNeighbor', RN_ENVS, () => {
+describeWithFlags("fromTexture:nearestNeighbor", RN_ENVS, () => {
let texture: WebGLTexture;
let input: tf.Tensor3D;
const inShape: [number, number, number] = [4, 4, 4];
@@ -117,33 +146,35 @@ describeWithFlags('fromTexture:nearestNeighbor', RN_ENVS, () => {
}
input = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [190, 191, 192, 255],
+ [180, 181, 182, 255],
+ [170, 171, 172, 255],
+ ],
+ [
+ [160, 161, 162, 255],
+ [150, 151, 152, 255],
+ [140, 141, 142, 255],
+ [130, 131, 132, 255],
+ ],
[
- [
- [200, 201, 202, 255],
- [190, 191, 192, 255],
- [180, 181, 182, 255],
- [170, 171, 172, 255],
- ],
- [
- [160, 161, 162, 255],
- [150, 151, 152, 255],
- [140, 141, 142, 255],
- [130, 131, 132, 255],
- ],
- [
- [120, 121, 122, 255],
- [110, 111, 112, 255],
- [100, 101, 102, 255],
- [90, 91, 92, 255],
- ],
- [
- [80, 81, 82, 255],
- [70, 71, 72, 255],
- [60, 61, 62, 255],
- [50, 51, 52, 255],
- ]
- ],
- inShape, 'int32');
+ [120, 121, 122, 255],
+ [110, 111, 112, 255],
+ [100, 101, 102, 255],
+ [90, 91, 92, 255],
+ ],
+ [
+ [80, 81, 82, 255],
+ [70, 71, 72, 255],
+ [60, 61, 62, 255],
+ [50, 51, 52, 255],
+ ],
+ ],
+ inShape,
+ "int32"
+ );
});
beforeEach(async () => {
@@ -154,263 +185,271 @@ describeWithFlags('fromTexture:nearestNeighbor', RN_ENVS, () => {
tf.dispose(input);
});
- it('same size alignCorners=false', async () => {
+ it("same size alignCorners=false", async () => {
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- true,
- {
- alignCorners: false,
- interpolation: 'nearest_neighbor',
- },
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ true,
+ {
+ alignCorners: false,
+ interpolation: "nearest_neighbor",
+ }
);
expectArraysEqual(await output.data(), await input.data());
expectArraysEqual(output.shape, input.shape);
});
- it('same size, alignCorners=true', async () => {
+ it("same size, alignCorners=true", async () => {
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- true,
- {
- alignCorners: true,
- interpolation: 'nearest_neighbor',
- },
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ true,
+ {
+ alignCorners: true,
+ interpolation: "nearest_neighbor",
+ }
);
expectArraysEqual(await output.data(), await input.data());
expectArraysEqual(output.shape, input.shape);
});
- it('smaller, resizeNearestNeighbor, same aspect ratio, alignCorners=false',
- async () => {
- const expectedShape: [number, number, number] = [2, 2, 4];
- const expected = tf.tensor3d(
- [
- [
- [200, 201, 202, 255],
- [180, 181, 182, 255],
- ],
- [
- [120, 121, 122, 255],
- [100, 101, 102, 255],
- ]
- ],
- expectedShape, 'int32');
-
- const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: false, interpolation: 'nearest_neighbor'},
- );
-
- expectArraysEqual(await output.data(), await expected.data());
- expectArraysEqual(output.shape, expected.shape);
- });
-
- it('smaller, resizeNearestNeighbor, same aspect ratio, alignCorners=true',
- async () => {
- const expectedShape: [number, number, number] = [2, 2, 4];
- const expected = tf.tensor3d(
- [
- [
- [200, 201, 202, 255],
- [170, 171, 172, 255],
- ],
- [
- [80, 81, 82, 255],
- [50, 51, 52, 255],
- ]
- ],
- expectedShape, 'int32');
-
- const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: true, interpolation: 'nearest_neighbor'},
- );
-
- expectArraysEqual(await output.data(), await expected.data());
- expectArraysEqual(output.shape, expected.shape);
- });
-
- it('smaller, resizeNearestNeighbor, wider, alignCorners=false', async () => {
+ it("smaller, resizeNearestNeighbor, same aspect ratio, alignCorners=false", async () => {
+ const expectedShape: [number, number, number] = [2, 2, 4];
+ const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [180, 181, 182, 255],
+ ],
+ [
+ [120, 121, 122, 255],
+ [100, 101, 102, 255],
+ ],
+ ],
+ expectedShape,
+ "int32"
+ );
+
+ const output = fromTexture(
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: false, interpolation: "nearest_neighbor" }
+ );
+
+ expectArraysEqual(await output.data(), await expected.data());
+ expectArraysEqual(output.shape, expected.shape);
+ });
+
+ it("smaller, resizeNearestNeighbor, same aspect ratio, alignCorners=true", async () => {
+ const expectedShape: [number, number, number] = [2, 2, 4];
+ const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [170, 171, 172, 255],
+ ],
+ [
+ [80, 81, 82, 255],
+ [50, 51, 52, 255],
+ ],
+ ],
+ expectedShape,
+ "int32"
+ );
+
+ const output = fromTexture(
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: true, interpolation: "nearest_neighbor" }
+ );
+
+ expectArraysEqual(await output.data(), await expected.data());
+ expectArraysEqual(output.shape, expected.shape);
+ });
+
+ it("smaller, resizeNearestNeighbor, wider, alignCorners=false", async () => {
const expectedShape: [number, number, number] = [2, 3, 4];
const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [190, 191, 192, 255],
+ [180, 181, 182, 255],
+ ],
[
- [
- [200, 201, 202, 255],
- [190, 191, 192, 255],
- [180, 181, 182, 255],
- ],
- [
- [120, 121, 122, 255],
- [110, 111, 112, 255],
- [100, 101, 102, 255],
- ]
+ [120, 121, 122, 255],
+ [110, 111, 112, 255],
+ [100, 101, 102, 255],
],
- expectedShape, 'int32');
+ ],
+ expectedShape,
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: false, interpolation: 'nearest_neighbor'},
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: false, interpolation: "nearest_neighbor" }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
- it('smaller, resizeNearestNeighbor, wider, alignCorners=true', async () => {
+ it("smaller, resizeNearestNeighbor, wider, alignCorners=true", async () => {
const expectedShape: [number, number, number] = [2, 3, 4];
const expected = tf.tensor3d(
+ [
[
- [
- [200, 201, 202, 255],
- [180, 181, 182, 255],
- [170, 171, 172, 255],
- ],
+ [200, 201, 202, 255],
+ [180, 181, 182, 255],
+ [170, 171, 172, 255],
+ ],
- [
- [80, 81, 82, 255],
- [60, 61, 62, 255],
- [50, 51, 52, 255],
- ]
+ [
+ [80, 81, 82, 255],
+ [60, 61, 62, 255],
+ [50, 51, 52, 255],
],
- expectedShape, 'int32');
+ ],
+ expectedShape,
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: true, interpolation: 'nearest_neighbor'},
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: true, interpolation: "nearest_neighbor" }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
- it('same size, should drop alpha channel', async () => {
+ it("same size, should drop alpha channel", async () => {
await detectGLCapabilities(gl);
const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202],
+ [190, 191, 192],
+ [180, 181, 182],
+ [170, 171, 172],
+ ],
+ [
+ [160, 161, 162],
+ [150, 151, 152],
+ [140, 141, 142],
+ [130, 131, 132],
+ ],
[
- [
- [200, 201, 202],
- [190, 191, 192],
- [180, 181, 182],
- [170, 171, 172],
- ],
- [
- [160, 161, 162],
- [150, 151, 152],
- [140, 141, 142],
- [130, 131, 132],
- ],
- [
- [120, 121, 122],
- [110, 111, 112],
- [100, 101, 102],
- [90, 91, 92],
- ],
- [
- [80, 81, 82],
- [70, 71, 72],
- [60, 61, 62],
- [50, 51, 52],
- ]
- ],
- [inShape[0], inShape[1], 3], 'int32');
+ [120, 121, 122],
+ [110, 111, 112],
+ [100, 101, 102],
+ [90, 91, 92],
+ ],
+ [
+ [80, 81, 82],
+ [70, 71, 72],
+ [60, 61, 62],
+ [50, 51, 52],
+ ],
+ ],
+ [inShape[0], inShape[1], 3],
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: inShape[0],
- width: inShape[1],
- depth: 3,
- },
- true,
- {
- alignCorners: true,
- interpolation: 'nearest_neighbor',
- },
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: 3,
+ },
+ true,
+ {
+ alignCorners: true,
+ interpolation: "nearest_neighbor",
+ }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
});
-describeWithFlags('fromTexture:bilinear', RN_ENVS, () => {
+describeWithFlags("fromTexture:bilinear", RN_ENVS, () => {
let texture: WebGLTexture;
let input: tf.Tensor3D;
const inShape: [number, number, number] = [4, 4, 4];
@@ -421,33 +460,35 @@ describeWithFlags('fromTexture:bilinear', RN_ENVS, () => {
}
input = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [190, 191, 192, 255],
+ [180, 181, 182, 255],
+ [170, 171, 172, 255],
+ ],
[
- [
- [200, 201, 202, 255],
- [190, 191, 192, 255],
- [180, 181, 182, 255],
- [170, 171, 172, 255],
- ],
- [
- [160, 161, 162, 255],
- [150, 151, 152, 255],
- [140, 141, 142, 255],
- [130, 131, 132, 255],
- ],
- [
- [120, 121, 122, 255],
- [110, 111, 112, 255],
- [100, 101, 102, 255],
- [90, 91, 92, 255],
- ],
- [
- [80, 81, 82, 255],
- [70, 71, 72, 255],
- [60, 61, 62, 255],
- [50, 51, 52, 255],
- ]
- ],
- inShape, 'int32');
+ [160, 161, 162, 255],
+ [150, 151, 152, 255],
+ [140, 141, 142, 255],
+ [130, 131, 132, 255],
+ ],
+ [
+ [120, 121, 122, 255],
+ [110, 111, 112, 255],
+ [100, 101, 102, 255],
+ [90, 91, 92, 255],
+ ],
+ [
+ [80, 81, 82, 255],
+ [70, 71, 72, 255],
+ [60, 61, 62, 255],
+ [50, 51, 52, 255],
+ ],
+ ],
+ inShape,
+ "int32"
+ );
});
afterAll(() => {
@@ -458,253 +499,263 @@ describeWithFlags('fromTexture:bilinear', RN_ENVS, () => {
texture = await toTexture(gl, input);
});
- it('same size alignCorners=false', async () => {
+ it("same size alignCorners=false", async () => {
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- true,
- {
- alignCorners: false,
- interpolation: 'bilinear',
- },
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ true,
+ {
+ alignCorners: false,
+ interpolation: "bilinear",
+ }
);
expectArraysEqual(await output.data(), await input.data());
expectArraysEqual(output.shape, input.shape);
});
- it('same size, alignCorners=true', async () => {
+ it("same size, alignCorners=true", async () => {
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- true,
- {
- alignCorners: true,
- interpolation: 'bilinear',
- },
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ true,
+ {
+ alignCorners: true,
+ interpolation: "bilinear",
+ }
);
expectArraysEqual(await output.data(), await input.data());
expectArraysEqual(output.shape, input.shape);
});
- it('smaller, same aspect ratio, alignCorners=false', async () => {
+ it("smaller, same aspect ratio, alignCorners=false", async () => {
const expectedShape: [number, number, number] = [2, 2, 4];
const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [180, 181, 182, 255],
+ ],
[
- [
- [200, 201, 202, 255],
- [180, 181, 182, 255],
- ],
- [
- [120, 121, 122, 255],
- [100, 101, 102, 255],
- ]
+ [120, 121, 122, 255],
+ [100, 101, 102, 255],
],
- expectedShape, 'int32');
+ ],
+ expectedShape,
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: false, interpolation: 'bilinear'},
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: false, interpolation: "bilinear" }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
- it('smaller, same aspect ratio, alignCorners=true', async () => {
+ it("smaller, same aspect ratio, alignCorners=true", async () => {
const expectedShape: [number, number, number] = [2, 2, 4];
const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [170, 171, 172, 255],
+ ],
[
- [
- [200, 201, 202, 255],
- [170, 171, 172, 255],
- ],
- [
- [80, 81, 82, 255],
- [50, 51, 52, 255],
- ]
+ [80, 81, 82, 255],
+ [50, 51, 52, 255],
],
- expectedShape, 'int32');
+ ],
+ expectedShape,
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: true, interpolation: 'bilinear'},
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: true, interpolation: "bilinear" }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
- it('smaller, wider, alignCorners=false', async () => {
+ it("smaller, wider, alignCorners=false", async () => {
const expectedShape: [number, number, number] = [2, 3, 4];
const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202, 255],
+ [187, 188, 189, 255],
+ [173, 174, 175, 255],
+ ],
[
- [
- [200, 201, 202, 255],
- [187, 188, 189, 255],
- [173, 174, 175, 255],
- ],
- [
- [120, 121, 122, 255],
- [107, 108, 109, 255],
- [93, 94, 95, 255],
- ]
+ [120, 121, 122, 255],
+ [107, 108, 109, 255],
+ [93, 94, 95, 255],
],
- expectedShape, 'int32');
+ ],
+ expectedShape,
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: false, interpolation: 'bilinear'},
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: false, interpolation: "bilinear" }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
- it('smaller, wider, alignCorners=true', async () => {
+ it("smaller, wider, alignCorners=true", async () => {
const expectedShape: [number, number, number] = [2, 3, 4];
const expected = tf.tensor3d(
+ [
[
- [
- [200, 201, 202, 255],
- [185, 186, 187, 255],
- [170, 171, 172, 255],
- ],
- [
- [80, 81, 82, 255],
- [65, 66, 67, 255],
- [50, 51, 52, 255],
- ]
+ [200, 201, 202, 255],
+ [185, 186, 187, 255],
+ [170, 171, 172, 255],
],
- expectedShape, 'int32');
+ [
+ [80, 81, 82, 255],
+ [65, 66, 67, 255],
+ [50, 51, 52, 255],
+ ],
+ ],
+ expectedShape,
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: expectedShape[0],
- width: expectedShape[1],
- depth: expectedShape[2],
- },
- true,
- {alignCorners: true, interpolation: 'bilinear'},
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: expectedShape[0],
+ width: expectedShape[1],
+ depth: expectedShape[2],
+ },
+ true,
+ { alignCorners: true, interpolation: "bilinear" }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
});
- it('same size, should drop alpha channel', async () => {
+ it("same size, should drop alpha channel", async () => {
await detectGLCapabilities(gl);
const expected = tf.tensor3d(
+ [
+ [
+ [200, 201, 202],
+ [190, 191, 192],
+ [180, 181, 182],
+ [170, 171, 172],
+ ],
+ [
+ [160, 161, 162],
+ [150, 151, 152],
+ [140, 141, 142],
+ [130, 131, 132],
+ ],
+ [
+ [120, 121, 122],
+ [110, 111, 112],
+ [100, 101, 102],
+ [90, 91, 92],
+ ],
[
- [
- [200, 201, 202],
- [190, 191, 192],
- [180, 181, 182],
- [170, 171, 172],
- ],
- [
- [160, 161, 162],
- [150, 151, 152],
- [140, 141, 142],
- [130, 131, 132],
- ],
- [
- [120, 121, 122],
- [110, 111, 112],
- [100, 101, 102],
- [90, 91, 92],
- ],
- [
- [80, 81, 82],
- [70, 71, 72],
- [60, 61, 62],
- [50, 51, 52],
- ]
- ],
- [inShape[0], inShape[1], 3], 'int32');
+ [80, 81, 82],
+ [70, 71, 72],
+ [60, 61, 62],
+ [50, 51, 52],
+ ],
+ ],
+ [inShape[0], inShape[1], 3],
+ "int32"
+ );
const output = fromTexture(
- gl,
- texture,
- {
- height: inShape[0],
- width: inShape[1],
- depth: inShape[2],
- },
- {
- height: inShape[0],
- width: inShape[1],
- depth: 3,
- },
- true,
- {
- alignCorners: true,
- interpolation: 'bilinear',
- },
+ gl,
+ texture,
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: inShape[2],
+ },
+ {
+ height: inShape[0],
+ width: inShape[1],
+ depth: 3,
+ },
+ true,
+ {
+ alignCorners: true,
+ interpolation: "bilinear",
+ }
);
expectArraysEqual(await output.data(), await expected.data());
expectArraysEqual(output.shape, expected.shape);
diff --git a/tfjs-react-native/src/platform_react_native.ts b/tfjs-react-native/src/platform_react_native.ts
index 8537981d61a..fa57fa6afb6 100644
--- a/tfjs-react-native/src/platform_react_native.ts
+++ b/tfjs-react-native/src/platform_react_native.ts
@@ -15,27 +15,31 @@
* =============================================================================
*/
-import '@tensorflow/tfjs-backend-cpu';
-import {GPGPUContext, MathBackendWebGL, setWebGLContext} from '@tensorflow/tfjs-backend-webgl';
-import * as tf from '@tensorflow/tfjs-core';
-import {Platform} from '@tensorflow/tfjs-core';
-import {Buffer} from 'buffer';
-import {GLView} from 'expo-gl';
-import {Platform as RNPlatform} from 'react-native';
+import "@tensorflow/tfjs-backend-cpu";
+import {
+ GPGPUContext,
+ MathBackendWebGL,
+ setWebGLContext,
+} from "@tensorflow/tfjs-backend-webgl";
+import * as tf from "@tensorflow/tfjs-core";
+import { Platform } from "@tensorflow/tfjs-core";
+import { Buffer } from "buffer";
+import { GLView } from "expo-gl";
+import { Platform as RNPlatform } from "react-native";
-// See implemetation note on fetch
+// See implementation note on fetch
// tslint:disable-next-line:max-line-length
// https://github.com/facebook/react-native/blob/0ee5f68929610106ee6864baa04ea90be0fc5160/Libraries/vendor/core/whatwg-fetch.js#L421
function parseHeaders(rawHeaders: string) {
const headers = new Headers();
// Replace instances of \r\n and \n followed by at least one space or
// horizontal tab with a space https://tools.ietf.org/html/rfc7230#section-3.2
- const preProcessedHeaders = rawHeaders.replace(/\r?\n[\t ]+/g, ' ');
- preProcessedHeaders.split(/\r?\n/).forEach(line => {
- const parts = line.split(':');
+ const preProcessedHeaders = rawHeaders.replace(/\r?\n[\t ]+/g, " ");
+ preProcessedHeaders.split(/\r?\n/).forEach((line) => {
+ const parts = line.split(":");
const key = parts.shift().trim();
if (key) {
- const value = parts.join(':').trim();
+ const value = parts.join(":").trim();
headers.append(key, value);
}
});
@@ -67,8 +71,10 @@ function parseHeaders(rawHeaders: string) {
* @doc {heading: 'Platform helpers', subheading: 'http'}
*/
export async function fetch(
- path: string, init?: RequestInit,
- options?: tf.io.RequestDetails): Promise {
+ path: string,
+ init?: RequestInit,
+ options?: tf.io.RequestDetails
+): Promise {
return new Promise((resolve, reject) => {
const request = new Request(path, init);
const xhr = new XMLHttpRequest();
@@ -77,27 +83,28 @@ export async function fetch(
const reqOptions = {
status: xhr.status,
statusText: xhr.statusText,
- headers: parseHeaders(xhr.getAllResponseHeaders() || ''),
- url: '',
+ headers: parseHeaders(xhr.getAllResponseHeaders() || ""),
+ url: "",
};
- reqOptions.url = 'responseURL' in xhr ?
- xhr.responseURL :
- reqOptions.headers.get('X-Request-URL');
+ reqOptions.url =
+ "responseURL" in xhr
+ ? xhr.responseURL
+ : reqOptions.headers.get("X-Request-URL");
- //@ts-ignore — ts belives the latter case will never occur.
- const body = 'response' in xhr ? xhr.response : xhr.responseText;
+ //@ts-ignore — ts believes the latter case will never occur.
+ const body = "response" in xhr ? xhr.response : xhr.responseText;
resolve(new Response(body, reqOptions));
};
- xhr.onerror = () => reject(new TypeError('Network request failed'));
- xhr.ontimeout = () => reject(new TypeError('Network request failed'));
+ xhr.onerror = () => reject(new TypeError("Network request failed"));
+ xhr.ontimeout = () => reject(new TypeError("Network request failed"));
xhr.open(request.method, request.url, true);
- if (request.credentials === 'include') {
+ if (request.credentials === "include") {
xhr.withCredentials = true;
- } else if (request.credentials === 'omit') {
+ } else if (request.credentials === "omit") {
xhr.withCredentials = false;
}
@@ -105,7 +112,7 @@ export async function fetch(
// In react native We need to set the response type to arraybuffer when
// fetching binary resources in order for `.arrayBuffer` to work correctly
// on the response.
- xhr.responseType = 'arraybuffer';
+ xhr.responseType = "arraybuffer";
}
request.headers.forEach((value: string, name: string) => {
@@ -113,8 +120,8 @@ export async function fetch(
});
xhr.send(
- //@ts-ignore
- typeof request._bodyInit === 'undefined' ? null : request._bodyInit,
+ //@ts-ignore
+ typeof request._bodyInit === "undefined" ? null : request._bodyInit
);
});
}
@@ -126,7 +133,10 @@ export class PlatformReactNative implements Platform {
* see @fetch docs above.
*/
async fetch(
- path: string, init?: RequestInit, options?: tf.io.RequestDetails) {
+ path: string,
+ init?: RequestInit,
+ options?: tf.io.RequestDetails
+ ) {
return fetch(path, init, options);
}
@@ -136,16 +146,16 @@ export class PlatformReactNative implements Platform {
*/
encode(text: string, encoding: string): Uint8Array {
// See https://www.w3.org/TR/encoding/#utf-16le
- if (encoding === 'utf-16') {
- encoding = 'utf16le';
+ if (encoding === "utf-16") {
+ encoding = "utf16le";
}
return new Uint8Array(Buffer.from(text, encoding as BufferEncoding));
}
/** Decode the provided bytes into a string using the provided encoding. */
decode(bytes: Uint8Array, encoding: string): string {
// See https://www.w3.org/TR/encoding/#utf-16le
- if (encoding === 'utf-16') {
- encoding = 'utf16le';
+ if (encoding === "utf-16") {
+ encoding = "utf16le";
}
return Buffer.from(bytes).toString(encoding as BufferEncoding);
}
@@ -160,13 +170,18 @@ export class PlatformReactNative implements Platform {
}
setTimeoutCustom() {
- throw new Error('react native does not support setTimeoutCustom');
+ throw new Error("react native does not support setTimeoutCustom");
}
- isTypedArray(a: unknown): a is Uint8Array | Float32Array | Int32Array
- | Uint8ClampedArray {
- return a instanceof Float32Array || a instanceof Int32Array ||
- a instanceof Uint8Array || a instanceof Uint8ClampedArray;
+ isTypedArray(
+ a: unknown
+ ): a is Uint8Array | Float32Array | Int32Array | Uint8ClampedArray {
+ return (
+ a instanceof Float32Array ||
+ a instanceof Int32Array ||
+ a instanceof Uint8Array ||
+ a instanceof Uint8ClampedArray
+ );
}
}
@@ -177,89 +192,96 @@ function setupGlobals() {
function registerWebGLBackend() {
try {
const PRIORITY = 5;
- tf.registerBackend('rn-webgl', async () => {
- const glContext = await GLView.createContextAsync();
+ tf.registerBackend(
+ "rn-webgl",
+ async () => {
+ const glContext = await GLView.createContextAsync();
- // ExpoGl getBufferSubData is not implemented yet (throws an exception).
- tf.env().set('WEBGL_BUFFER_SUPPORTED', false);
+ // ExpoGl getBufferSubData is not implemented yet (throws an exception).
+ tf.env().set("WEBGL_BUFFER_SUPPORTED", false);
- //
- // Mock extension support for EXT_color_buffer_float and
- // EXT_color_buffer_half_float on the expo-gl context object.
- // In react native we do not have to get a handle to the extension
- // in order to use the functionality of that extension on the device.
- //
- // This code block makes iOS and Android devices pass the extension checks
- // used in core. After those are done core will actually test whether
- // we can render/download float or half float textures.
- //
- // We can remove this block once we upstream checking for these
- // extensions in expo.
- //
- // TODO look into adding support for checking these extensions in expo-gl
- //
- //@ts-ignore
- const getExt = glContext.getExtension.bind(glContext);
- const shimGetExt = (name: string) => {
- if (name === 'EXT_color_buffer_float') {
- if (RNPlatform.OS === 'ios') {
- // iOS does not support EXT_color_buffer_float
- return null;
- } else {
+ //
+ // Mock extension support for EXT_color_buffer_float and
+ // EXT_color_buffer_half_float on the expo-gl context object.
+ // In react native we do not have to get a handle to the extension
+ // in order to use the functionality of that extension on the device.
+ //
+ // This code block makes iOS and Android devices pass the extension checks
+ // used in core. After those are done core will actually test whether
+ // we can render/download float or half float textures.
+ //
+ // We can remove this block once we upstream checking for these
+ // extensions in expo.
+ //
+ // TODO look into adding support for checking these extensions in expo-gl
+ //
+ //@ts-ignore
+ const getExt = glContext.getExtension.bind(glContext);
+ const shimGetExt = (name: string) => {
+ if (name === "EXT_color_buffer_float") {
+ if (RNPlatform.OS === "ios") {
+ // iOS does not support EXT_color_buffer_float
+ return null;
+ } else {
+ return {};
+ }
+ }
+
+ if (name === "EXT_color_buffer_half_float") {
return {};
}
- }
+ return getExt(name);
+ };
- if (name === 'EXT_color_buffer_half_float') {
+ //
+ // Manually make 'read' synchronous. glContext has a defined gl.fenceSync
+ // function that throws a "Not implemented yet" exception so core
+ // cannot properly detect that it is not supported. We mock
+ // implementations of gl.fenceSync and gl.clientWaitSync
+ // TODO remove once fenceSync and clientWaitSync is implemented upstream.
+ //
+ const shimFenceSync = () => {
return {};
- }
- return getExt(name);
- };
+ };
+ const shimClientWaitSync = () => glContext.CONDITION_SATISFIED;
- //
- // Manually make 'read' synchronous. glContext has a defined gl.fenceSync
- // function that throws a "Not implemented yet" exception so core
- // cannot properly detect that it is not supported. We mock
- // implementations of gl.fenceSync and gl.clientWaitSync
- // TODO remove once fenceSync and clientWaitSync is implemented upstream.
- //
- const shimFenceSync = () => {
- return {};
- };
- const shimClientWaitSync = () => glContext.CONDITION_SATISFIED;
+ // @ts-ignore
+ glContext.getExtension = shimGetExt.bind(glContext);
+ glContext.fenceSync = shimFenceSync.bind(glContext);
+ glContext.clientWaitSync = shimClientWaitSync.bind(glContext);
- // @ts-ignore
- glContext.getExtension = shimGetExt.bind(glContext);
- glContext.fenceSync = shimFenceSync.bind(glContext);
- glContext.clientWaitSync = shimClientWaitSync.bind(glContext);
+ // Set the WebGLContext before flag evaluation
+ setWebGLContext(2, glContext);
+ const context = new GPGPUContext();
+ const backend = new MathBackendWebGL(context);
- // Set the WebGLContext before flag evaluation
- setWebGLContext(2, glContext);
- const context = new GPGPUContext();
- const backend = new MathBackendWebGL(context);
-
- return backend;
- }, PRIORITY);
+ return backend;
+ },
+ PRIORITY
+ );
// Register all the webgl kernels on the rn-webgl backend
// TODO: Use tf.copyRegisteredKernels once synced to tfjs-core 2.5.0.
// tf.copyRegisteredKernels('webgl', 'rn-webgl');
- const kernels = tf.getKernelsForBackend('webgl');
- kernels.forEach(kernelConfig => {
- const newKernelConfig =
- Object.assign({}, kernelConfig, {backendName: 'rn-webgl'});
+ const kernels = tf.getKernelsForBackend("webgl");
+ kernels.forEach((kernelConfig) => {
+ const newKernelConfig = Object.assign({}, kernelConfig, {
+ backendName: "rn-webgl",
+ });
tf.registerKernel(newKernelConfig);
});
} catch (e) {
- throw (new Error(`Failed to register Webgl backend: ${e.message}`));
+ throw new Error(`Failed to register Webgl backend: ${e.message}`);
}
}
tf.env().registerFlag(
- 'IS_REACT_NATIVE', () => navigator && navigator.product === 'ReactNative');
+ "IS_REACT_NATIVE",
+ () => navigator && navigator.product === "ReactNative"
+);
-if (tf.env().getBool('IS_REACT_NATIVE')) {
+if (tf.env().getBool("IS_REACT_NATIVE")) {
setupGlobals();
registerWebGLBackend();
- tf.setPlatform('react-native', new PlatformReactNative());
+ tf.setPlatform("react-native", new PlatformReactNative());
}
From 1bf2fca8df1725e804b08b3668c0375b090146d9 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Wed, 17 Apr 2024 22:49:15 +0530
Subject: [PATCH 08/33] Update exception logic in convolutional.ts (#8248)
BUG
---
tfjs-layers/src/layers/convolutional.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tfjs-layers/src/layers/convolutional.ts b/tfjs-layers/src/layers/convolutional.ts
index c75240e1fd2..5207c1f88c4 100644
--- a/tfjs-layers/src/layers/convolutional.ts
+++ b/tfjs-layers/src/layers/convolutional.ts
@@ -105,7 +105,7 @@ export function conv1dWithBias(
if (bias != null && bias.shape.length !== 1) {
throw new ValueError(
`The bias for a conv1dWithBias operation should be 1, but is ` +
- `${kernel.shape.length} instead`);
+ `${bias.shape.length} instead`);
}
// TODO(cais): Support CAUSAL padding mode.
if (dataFormat === 'channelsFirst') {
From 5092b042516cdef5267668b46c154fd0476af6d7 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Fri, 19 Apr 2024 04:32:45 +0530
Subject: [PATCH 09/33] Update installation steps for Windows / Mac OS X for
tfjs-node in README.md (#8101)
---
tfjs-node/README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tfjs-node/README.md b/tfjs-node/README.md
index 4edde2843fc..25a3cd3cc3f 100644
--- a/tfjs-node/README.md
+++ b/tfjs-node/README.md
@@ -39,9 +39,9 @@ npm install @tensorflow/tfjs-node-gpu
yarn add @tensorflow/tfjs-node-gpu
```
-#### Windows / Mac OS X Requires Python 2.7
+#### Windows / Mac OS X Requires Supported Version of Python
-Windows & OSX build support for `node-gyp` requires Python 2.7. Be sure to have this version before installing `@tensorflow/tfjs-node` or `@tensorflow/tfjs-node-gpu`. Machines with Python 3.x will not install the bindings properly.
+Windows & OSX build support for `node-gyp` requires that you have installed a [supported version of Python](https://devguide.python.org/versions/#supported-versions). Be sure to have supported version of Python before installing `@tensorflow/tfjs-node` or `@tensorflow/tfjs-node-gpu`.
*For more troubleshooting on Windows, check out [WINDOWS_TROUBLESHOOTING.md](./WINDOWS_TROUBLESHOOTING.md).*
From 68c4de61d530ace87bb0318a4a4b3ca1888dadec Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Thu, 18 Apr 2024 16:19:35 -0700
Subject: [PATCH 10/33] Export WebGPUProgram as type to fix g3 isolatedModules
issue (#8252)
---
tfjs-backend-webgpu/src/webgpu.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tfjs-backend-webgpu/src/webgpu.ts b/tfjs-backend-webgpu/src/webgpu.ts
index 8c484e1b02c..835a4c86e4a 100644
--- a/tfjs-backend-webgpu/src/webgpu.ts
+++ b/tfjs-backend-webgpu/src/webgpu.ts
@@ -17,5 +17,5 @@
import * as webgpu_util from './webgpu_util';
export {WebGPUBackend} from './backend_webgpu';
-export {WebGPUProgram} from './webgpu_program';
+export type {WebGPUProgram} from './webgpu_program';
export {webgpu_util};
From 737b8f1052d37253693c63b7adc9b930c7a4502d Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Tue, 21 May 2024 22:39:24 +0530
Subject: [PATCH 11/33] Update tar package version for tfjs-node &
tfjs-node-gpu (#8280)
---
tfjs-node-gpu/package.json | 2 +-
tfjs-node/package.json | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tfjs-node-gpu/package.json b/tfjs-node-gpu/package.json
index 111260824ed..e1773f308c4 100644
--- a/tfjs-node-gpu/package.json
+++ b/tfjs-node-gpu/package.json
@@ -70,7 +70,7 @@
"https-proxy-agent": "^2.2.1",
"progress": "^2.0.0",
"rimraf": "^2.6.2",
- "tar": "^4.4.6"
+ "tar": "^6.2.1"
},
"binary": {
"module_name": "tfjs_binding",
diff --git a/tfjs-node/package.json b/tfjs-node/package.json
index f0461648057..46d0c99e09a 100644
--- a/tfjs-node/package.json
+++ b/tfjs-node/package.json
@@ -68,7 +68,7 @@
"https-proxy-agent": "^2.2.1",
"progress": "^2.0.0",
"rimraf": "^2.6.2",
- "tar": "^4.4.6"
+ "tar": "^6.2.1"
},
"binary": {
"module_name": "tfjs_binding",
From fcad805d35a16efc4b9e42c9ef5950cf6c0693f2 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Thu, 30 May 2024 05:45:25 +0530
Subject: [PATCH 12/33] Update supported TypedArray data types in tf.tensor in
the documentation (#8287)
* Update supported TypedArray data types in tf.tensor in the documentation
* Address Exceeds maximum line length of 80 tslint error
* Address Exceeds maximum line length of 80 tslint error again
* Address no-consecutive-blank-lines tslint error
---
tfjs-core/src/ops/tensor.ts | 47 +++++++++++++++++++------------------
1 file changed, 24 insertions(+), 23 deletions(-)
diff --git a/tfjs-core/src/ops/tensor.ts b/tfjs-core/src/ops/tensor.ts
index 9b664e07a07..b472ebdc662 100644
--- a/tfjs-core/src/ops/tensor.ts
+++ b/tfjs-core/src/ops/tensor.ts
@@ -172,28 +172,29 @@ import {makeTensor} from './tensor_ops_util';
* await tf.setBackend(savedBackend);
* ```
* @param values The values of the tensor. Can be nested array of numbers,
- * or a flat array, or a `TypedArray`, or a `WebGLData` object, or a
- * `WebGPUData` object. If the values are strings, they will be encoded as utf-8
- * and kept as `Uint8Array[]`. If the values is a `WebGLData` object, the dtype
- * could only be 'float32' or 'int32' and the object has to have: 1. texture, a
- * `WebGLTexture`, the texture must share the same `WebGLRenderingContext` with
- * TFJS's WebGL backend (you could create a custom WebGL backend from your
- * texture's canvas) and the internal texture format for the input texture must
- * be floating point or normalized integer; 2. height, the height of the
- * texture; 3. width, the width of the texture; 4. channels, a non-empty subset
- * of 'RGBA', indicating the values of which channels will be passed to the
- * tensor, such as 'R' or 'BR' (The order of the channels affect the order of
- * tensor values. ). (If the values passed from texture is less than the tensor
- * size, zeros will be padded at the rear.). If the values is a `WebGPUData`
- * object, the dtype could only be 'float32' or 'int32 and the object has to
- * have: buffer, a `GPUBuffer`. The buffer must: 1. share the same `GPUDevice`
- * with TFJS's WebGPU backend; 2. buffer.usage should at least support
- * GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC; 3. buffer.size should not
- * be smaller than the byte size of tensor shape. WebGPUData optionally supports
- * zero copy by flag zeroCopy. When zeroCopy is false or undefined(default),
- * this passing GPUBuffer can be destroyed after tensor is created. When
- * zeroCopy is true, this GPUBuffer is bound directly by the tensor, so do not
- * destroy this GPUBuffer until all access is done.
+ * or a flat array, or a `TypedArray`(At the moment it supports Uint8Array,
+ * Uint8ClampedArray, Int32Array, Float32Array) data types, or a `WebGLData`
+ * object, or a `WebGPUData` object. If the values are strings, they will be
+ * encoded as utf-8 and kept as `Uint8Array[]`. If the values is a `WebGLData`
+ * object, the dtype could only be 'float32' or 'int32' and the object has to
+ * have: 1. texture, a `WebGLTexture`, the texture must share the same
+ * `WebGLRenderingContext` with TFJS's WebGL backend (you could create a custom
+ * WebGL backend from your texture's canvas) and the internal texture format
+ * for the input texture must be floating point or normalized integer; 2.
+ * height, the height of the texture; 3. width, the width of the texture; 4.
+ * channels, a non-empty subset of 'RGBA', indicating the values of which
+ * channels will be passed to the tensor, such as 'R' or 'BR' (The order of the
+ * channels affect the order of tensor values. ). (If the values passed from
+ * texture is less than the tensor size, zeros will be padded at the rear.). If
+ * the values is a `WebGPUData` object, the dtype could only be 'float32' or
+ * 'int32 and the object has to have: buffer, a `GPUBuffer`. The buffer must:
+ * 1. share the same `GPUDevice` with TFJS's WebGPU backend; 2. buffer.usage
+ * should at least support GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC; 3.
+ * buffer.size should not be smaller than the byte size of tensor shape.
+ * WebGPUData optionally supports zero copy by flag zeroCopy. When zeroCopy is
+ * false or undefined(default),this passing GPUBuffer can be destroyed after
+ * tensor is created. When zeroCopy is true, this GPUBuffer is bound directly
+ * by the tensor, so do not destroy this GPUBuffer until all access is done.
* @param shape The shape of the tensor. Optional. If not provided,
* it is inferred from `values`.
* @param dtype The data type.
@@ -205,4 +206,4 @@ export function tensor(
dtype?: DataType): Tensor {
const inferredShape = inferShape(values, dtype);
return makeTensor(values, shape, inferredShape, dtype) as Tensor;
-}
+}
\ No newline at end of file
From 97f1531a096900a1af5b9070b36a54cedc3e1125 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Tue, 6 Aug 2024 13:15:19 -0400
Subject: [PATCH 13/33] Fix iOS nightly device deprecated by updating device
(#8348)
---
tfjs-layers/BUILD.bazel | 2 +-
tools/karma_template.conf.js | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/tfjs-layers/BUILD.bazel b/tfjs-layers/BUILD.bazel
index cb1a4e602c2..88f1358c418 100644
--- a/tfjs-layers/BUILD.bazel
+++ b/tfjs-layers/BUILD.bazel
@@ -55,7 +55,7 @@ tfjs_web_test(
# disabled android test due to training flakiness
# "bs_android_10",
"win_10_chrome",
- "bs_ios_15",
+ "bs_ios_17",
],
headless = False,
seed = "12345",
diff --git a/tools/karma_template.conf.js b/tools/karma_template.conf.js
index d6bc26ec529..1e90d9bcba4 100644
--- a/tools/karma_template.conf.js
+++ b/tools/karma_template.conf.js
@@ -62,11 +62,11 @@ const CUSTOM_LAUNCHERS = {
os_version: '12.3',
real_mobile: true
},
- bs_ios_15: {
+ bs_ios_17: {
base: 'BrowserStack',
- device: 'iPhone 11 Pro',
+ device: 'iPhone 15 Pro Max',
os: 'ios',
- os_version: '15',
+ os_version: '17',
real_mobile: true
},
bs_android_10: {
From 0677375de6596b33ab81ea78eca736f0072afb47 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Thu, 22 Aug 2024 00:22:32 +0530
Subject: [PATCH 14/33] Update broken link for tf.keras SavedModel in README.md
(#8313)
---
tfjs-converter/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tfjs-converter/README.md b/tfjs-converter/README.md
index 6b4d69e8196..3a8821cb978 100644
--- a/tfjs-converter/README.md
+++ b/tfjs-converter/README.md
@@ -17,7 +17,7 @@ using an already hosted model (e.g. MobileNet), skip this step.
2. [JavaScript API](./src/executor/graph_model.ts), for loading and running
inference.
-## Step 1: Converting a [TensorFlow SavedModel](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md), [TensorFlow Hub module](https://www.tensorflow.org/hub/), [Keras HDF5](https://keras.io/getting_started/faq/#what-are-my-options-for-saving-models), [tf.keras SavedModel](https://www.tensorflow.org/api_docs/python/tf/keras/saving/save_model), or [Flax/JAX model](http://github.com/google/flax) to a web-friendly format
+## Step 1: Converting a [TensorFlow SavedModel](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md), [TensorFlow Hub module](https://www.tensorflow.org/hub/), [Keras HDF5](https://keras.io/getting_started/faq/#what-are-my-options-for-saving-models), [tf.keras SavedModel](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model), or [Flax/JAX model](http://github.com/google/flax) to a web-friendly format
__0. Please make sure that you run in a Docker container or a virtual environment.__
From 936b448c209fa683beef2dfc3d7ead2c0ecb35e9 Mon Sep 17 00:00:00 2001
From: lukonik <81145822+lukonik@users.noreply.github.com>
Date: Thu, 22 Aug 2024 20:00:22 +0400
Subject: [PATCH 15/33] Subject: Add R2Score metric. (#8169) (#8353)
Body:
FEATURE
Co-authored-by: Matthew Soulanille
---
tfjs-layers/src/exports_metrics.ts | 19 +++++++++++++++++++
tfjs-layers/src/metrics.ts | 12 +++++++++---
tfjs-layers/src/metrics_test.ts | 23 ++++++++++++++++++++++-
3 files changed, 50 insertions(+), 4 deletions(-)
diff --git a/tfjs-layers/src/exports_metrics.ts b/tfjs-layers/src/exports_metrics.ts
index dd6472c34f3..84ffc6e220d 100644
--- a/tfjs-layers/src/exports_metrics.ts
+++ b/tfjs-layers/src/exports_metrics.ts
@@ -314,3 +314,22 @@ export function MSE(yTrue: Tensor, yPred: Tensor): Tensor {
export function mse(yTrue: Tensor, yPred: Tensor): Tensor {
return losses.meanSquaredError(yTrue, yPred);
}
+
+/**
+ * Computes R2 score.
+ *
+ * ```js
+ * const yTrue = tf.tensor2d([[0, 1], [3, 4]]);
+ * const yPred = tf.tensor2d([[0, 1], [-3, -4]]);
+ * const r2Score = tf.metrics.r2Score(yTrue, yPred);
+ * r2Score.print();
+ * ```
+ * @param yTrue Truth Tensor.
+ * @param yPred Prediction Tensor.
+ * @return R2 score Tensor.
+ *
+ * @doc {heading: 'Metrics', namespace: 'metrics'}
+ */
+export function r2Score(yTrue: Tensor, yPred: Tensor): Tensor {
+ return metrics.r2Score(yTrue, yPred);
+}
diff --git a/tfjs-layers/src/metrics.ts b/tfjs-layers/src/metrics.ts
index a8080d8bf89..7c0f52d41a2 100644
--- a/tfjs-layers/src/metrics.ts
+++ b/tfjs-layers/src/metrics.ts
@@ -17,9 +17,7 @@ import {Tensor, tidy} from '@tensorflow/tfjs-core';
import * as K from './backend/tfjs_backend';
import {NotImplementedError, ValueError} from './errors';
-import {categoricalCrossentropy as categoricalCrossentropyLoss, cosineProximity, meanAbsoluteError, meanAbsolutePercentageError, meanSquaredError, sparseCategoricalCrossentropy as sparseCategoricalCrossentropyLoss} from './losses';
-import {binaryCrossentropy as lossBinaryCrossentropy} from './losses';
-import {lossesMap} from './losses';
+import {binaryCrossentropy as lossBinaryCrossentropy, categoricalCrossentropy as categoricalCrossentropyLoss, cosineProximity, lossesMap, meanAbsoluteError, meanAbsolutePercentageError, meanSquaredError, sparseCategoricalCrossentropy as sparseCategoricalCrossentropyLoss} from './losses';
import {LossOrMetricFn} from './types';
import * as util from './utils/generic_utils';
@@ -112,6 +110,14 @@ export function sparseTopKCategoricalAccuracy(
throw new NotImplementedError();
}
+export function r2Score(yTrue: Tensor, yPred: Tensor): Tensor {
+ return tidy(() => {
+ const sumSquaresResiduals = yTrue.sub(yPred).square().sum();
+ const sumSquares = yTrue.sub(yTrue.mean()).square().sum();
+ return tfc.scalar(1).sub(sumSquaresResiduals.div(sumSquares));
+ });
+}
+
// Aliases.
export const mse = meanSquaredError;
export const MSE = meanSquaredError;
diff --git a/tfjs-layers/src/metrics_test.ts b/tfjs-layers/src/metrics_test.ts
index 3bcb3308bca..e34852d9f7d 100644
--- a/tfjs-layers/src/metrics_test.ts
+++ b/tfjs-layers/src/metrics_test.ts
@@ -16,7 +16,7 @@ import {scalar, Tensor, tensor, tensor1d, tensor2d} from '@tensorflow/tfjs-core'
import {setEpsilon} from './backend/common';
import * as tfl from './index';
-import {binaryAccuracy, categoricalAccuracy, get, getLossOrMetricName} from './metrics';
+import {binaryAccuracy, categoricalAccuracy, get, getLossOrMetricName, r2Score} from './metrics';
import {LossOrMetricFn} from './types';
import {describeMathCPUAndGPU, describeMathCPUAndWebGL2, expectTensorsClose} from './utils/test_utils';
@@ -283,6 +283,27 @@ describeMathCPUAndGPU('recall metric', () => {
});
});
+describeMathCPUAndGPU('r2Score', () => {
+ it('1D', () => {
+ const yTrue = tensor1d([3, -0.5, 2, 7, 4.2, 8.5, 1.3, 2.8, 6.7, 9.0]);
+ const yPred = tensor1d([2.5, 0.0, 2.1, 7.8, 4.0, 8.2, 1.4, 2.9, 6.5, 9.1]);
+ const score = r2Score(yTrue, yPred);
+ expectTensorsClose(score, scalar(0.985));
+ });
+ it('2D', () => {
+ const yTrue = tensor2d([
+ [3, 2.5], [-0.5, 3.2], [2, 1.9], [7, 5.1], [4.2, 3.8], [8.5, 7.4],
+ [1.3, 0.6], [2.8, 2.1], [6.7, 5.3], [9.0, 8.7]
+ ]);
+ const yPred = tensor2d([
+ [2.7, 2.3], [0.0, 3.1], [2.1, 1.8], [6.8, 5.0], [4.1, 3.7], [8.4, 7.2],
+ [1.4, 0.7], [2.9, 2.2], [6.6, 5.2], [9.2, 8.9]
+ ]);
+ const score = r2Score(yTrue, yPred);
+ expectTensorsClose(score, scalar(0.995));
+ });
+});
+
describe('metrics.get', () => {
it('valid name, not alias', () => {
expect(get('binaryAccuracy') === get('categoricalAccuracy')).toEqual(false);
From 3daf152cb794f4da58fce5e21e09e8a4f89c8f80 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Fri, 23 Aug 2024 17:29:08 -0400
Subject: [PATCH 16/33] Quote props of DataType enum to prevent name mangling
(#8361)
BUG
These properties must be quoted since they are used by parseDtypeParam in tfjs-converter/src/operations/operation_mapper.ts to look up dtypes by string name. If they are not quoted, Closure will mangle their names.
---
tfjs-converter/src/data/compiled_api.ts | 98 +++++++++++++------------
1 file changed, 51 insertions(+), 47 deletions(-)
diff --git a/tfjs-converter/src/data/compiled_api.ts b/tfjs-converter/src/data/compiled_api.ts
index d5d0b4f1786..4fa5fe885b9 100644
--- a/tfjs-converter/src/data/compiled_api.ts
+++ b/tfjs-converter/src/data/compiled_api.ts
@@ -29,61 +29,65 @@ export declare interface IAny {
/** DataType enum. */
export enum DataType {
+ // These properties must be quoted since they are used by parseDtypeParam
+ // in tfjs-converter/src/operations/operation_mapper.ts to look up dtypes
+ // by string name. If they are not quoted, Closure will mangle their names.
+
// Not a legal value for DataType. Used to indicate a DataType field
// has not been set.
- DT_INVALID = 0,
+ 'DT_INVALID' = 0,
// Data types that all computation devices are expected to be
// capable to support.
- DT_FLOAT = 1,
- DT_DOUBLE = 2,
- DT_INT32 = 3,
- DT_UINT8 = 4,
- DT_INT16 = 5,
- DT_INT8 = 6,
- DT_STRING = 7,
- DT_COMPLEX64 = 8, // Single-precision complex
- DT_INT64 = 9,
- DT_BOOL = 10,
- DT_QINT8 = 11, // Quantized int8
- DT_QUINT8 = 12, // Quantized uint8
- DT_QINT32 = 13, // Quantized int32
- DT_BFLOAT16 = 14, // Float32 truncated to 16 bits. Only for cast ops.
- DT_QINT16 = 15, // Quantized int16
- DT_QUINT16 = 16, // Quantized uint16
- DT_UINT16 = 17,
- DT_COMPLEX128 = 18, // Double-precision complex
- DT_HALF = 19,
- DT_RESOURCE = 20,
- DT_VARIANT = 21, // Arbitrary C++ data types
- DT_UINT32 = 22,
- DT_UINT64 = 23,
+ 'DT_FLOAT' = 1,
+ 'DT_DOUBLE' = 2,
+ 'DT_INT32' = 3,
+ 'DT_UINT8' = 4,
+ 'DT_INT16' = 5,
+ 'DT_INT8' = 6,
+ 'DT_STRING' = 7,
+ 'DT_COMPLEX64' = 8, // Single-precision complex
+ 'DT_INT64' = 9,
+ 'DT_BOOL' = 10,
+ 'DT_QINT8' = 11, // Quantized int8
+ 'DT_QUINT8' = 12, // Quantized uint8
+ 'DT_QINT32' = 13, // Quantized int32
+ 'DT_BFLOAT16' = 14, // Float32 truncated to 16 bits. Only for cast ops.
+ 'DT_QINT16' = 15, // Quantized int16
+ 'DT_QUINT16' = 16, // Quantized uint16
+ 'DT_UINT16' = 17,
+ 'DT_COMPLEX128' = 18, // Double-precision complex
+ 'DT_HALF' = 19,
+ 'DT_RESOURCE' = 20,
+ 'DT_VARIANT' = 21, // Arbitrary C++ data types
+ 'DT_UINT32' = 22,
+ 'DT_UINT64' = 23,
// Do not use! These are only for parameters. Every enum above
// should have a corresponding value below (verified by types_test).
- DT_FLOAT_REF = 101,
- DT_DOUBLE_REF = 102,
- DT_INT32_REF = 103,
- DT_UINT8_REF = 104,
- DT_INT16_REF = 105,
- DT_INT8_REF = 106,
- DT_STRING_REF = 107,
- DT_COMPLEX64_REF = 108,
- DT_INT64_REF = 109,
- DT_BOOL_REF = 110,
- DT_QINT8_REF = 111,
- DT_QUINT8_REF = 112,
- DT_QINT32_REF = 113,
- DT_BFLOAT16_REF = 114,
- DT_QINT16_REF = 115,
- DT_QUINT16_REF = 116,
- DT_UINT16_REF = 117,
- DT_COMPLEX128_REF = 118,
- DT_HALF_REF = 119,
- DT_RESOURCE_REF = 120,
- DT_VARIANT_REF = 121,
- DT_UINT32_REF = 122,
- DT_UINT64_REF = 123,
+ 'DT_FLOAT_REF' = 101,
+ 'DT_DOUBLE_REF' = 102,
+ 'DT_INT32_REF' = 103,
+ 'DT_UINT8_REF' = 104,
+ 'DT_INT16_REF' = 105,
+ 'DT_INT8_REF' = 106,
+ 'DT_STRING_REF' = 107,
+ 'DT_COMPLEX64_REF' = 108,
+ 'DT_INT64_REF' = 109,
+ 'DT_BOOL_REF' = 110,
+ 'DT_QINT8_REF' = 111,
+ 'DT_QUINT8_REF' = 112,
+ 'DT_QINT32_REF' = 113,
+ 'DT_BFLOAT16_REF' = 114,
+ 'DT_QINT16_REF' = 115,
+ 'DT_QUINT16_REF' = 116,
+ 'DT_UINT16_REF' = 117,
+ 'DT_COMPLEX128_REF' = 118,
+ 'DT_HALF_REF' = 119,
+ 'DT_RESOURCE_REF' = 120,
+ 'DT_VARIANT_REF' = 121,
+ 'DT_UINT32_REF' = 122,
+ 'DT_UINT64_REF' = 123,
}
/** Properties of a TensorShape. */
From 636c616d148b4235a9cd51b92683e06bb4e02c8b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Val=C3=A9rian=20Rousset?=
Date: Tue, 8 Oct 2024 20:35:37 +0200
Subject: [PATCH 17/33] [tfjs-data] support async generator (#8408)
---
tfjs-data/src/readers.ts | 19 +++++++++----------
tfjs-data/src/readers_test.ts | 15 +++++++++++++++
2 files changed, 24 insertions(+), 10 deletions(-)
diff --git a/tfjs-data/src/readers.ts b/tfjs-data/src/readers.ts
index ed93a55577c..070874c4f5a 100644
--- a/tfjs-data/src/readers.ts
+++ b/tfjs-data/src/readers.ts
@@ -140,14 +140,12 @@ export function func(
/**
* Create a `Dataset` that produces each element from provided JavaScript
- * generator, which is a function*
- * (https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators#Generator_functions),
- * or a function that returns an
- * iterator
- * (https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators#Generator_functions).
+ * generator, which is a function that returns a (potentially async) iterator.
*
- * The returned iterator should have `.next()` function that returns element in
- * format of `{value: TensorContainer, done:boolean}`.
+ * For more information on iterators and generators, see
+ * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators .
+ * For the iterator protocol, see
+ * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols .
*
* Example of creating a dataset from an iterator factory:
* ```js
@@ -188,8 +186,8 @@ export function func(
* await ds.forEachAsync(e => console.log(e));
* ```
*
- * @param generator A JavaScript generator function that returns a JavaScript
- * iterator.
+ * @param generator A JavaScript function that returns
+ * a (potentially async) JavaScript iterator.
*
* @doc {
* heading: 'Data',
@@ -199,7 +197,8 @@ export function func(
* }
*/
export function generator(
- generator: () => Iterator| Promise>): Dataset {
+ generator: () => Iterator | Promise> | AsyncIterator,
+): Dataset {
return datasetFromIteratorFn(async () => {
const gen = await generator();
return iteratorFromFunction(() => gen.next());
diff --git a/tfjs-data/src/readers_test.ts b/tfjs-data/src/readers_test.ts
index 740ef2d6cbb..4a4bdb6306c 100644
--- a/tfjs-data/src/readers_test.ts
+++ b/tfjs-data/src/readers_test.ts
@@ -45,6 +45,21 @@ describeAllEnvs('readers', () => {
expect(result).toEqual([0, 1, 2, 3, 4]);
});
+ it('generate dataset from async generator', async () => {
+ async function* dataGenerator() {
+ const numElements = 5;
+ let index = 0;
+ while (index < numElements) {
+ const x = index;
+ index++;
+ yield x;
+ }
+ }
+ const ds = tfd.generator(dataGenerator);
+ const result = await ds.toArrayForTest();
+ expect(result).toEqual([0, 1, 2, 3, 4]);
+ });
+
it('generate multiple datasets from JavaScript generator', async () => {
function* dataGenerator() {
const numElements = 5;
From 15c00f8948534b8c2a3a59113872c0206e06dd2f Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Fri, 11 Oct 2024 03:33:13 +0530
Subject: [PATCH 18/33] Fix typos in the documentation strings of the
tfjs-layers directory (#8411)
---
tfjs-layers/README.md | 2 +-
tfjs-layers/demos/README.md | 2 +-
.../src/layers/nlp/modeling/transformer_decoder_test.ts | 4 ++--
tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts | 2 +-
tfjs-layers/src/layers/nlp/multihead_attention.ts | 2 +-
tfjs-layers/src/layers/normalization.ts | 2 +-
tfjs-layers/src/layers/normalization_test.ts | 4 ++--
.../src/layers/preprocessing/image_resizing_test.ts | 2 +-
tfjs-layers/src/layers/preprocessing/random_height.ts | 4 ++--
tfjs-layers/src/layers/preprocessing/random_width.ts | 4 ++--
tfjs-layers/src/layers/recurrent.ts | 8 ++++----
11 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/tfjs-layers/README.md b/tfjs-layers/README.md
index cd462791eb9..1fd735fce4c 100644
--- a/tfjs-layers/README.md
+++ b/tfjs-layers/README.md
@@ -52,7 +52,7 @@ const ys = tf.tensor2d([[1], [3], [5], [7]], [4, 1]);
// Train the model.
await model.fit(xs, ys, {epochs: 500});
-// Ater the training, perform inference.
+// After the training, perform inference.
const output = model.predict(tf.tensor2d([[5]], [1, 1]));
output.print();
```
diff --git a/tfjs-layers/demos/README.md b/tfjs-layers/demos/README.md
index 3930909f743..71385b3818e 100644
--- a/tfjs-layers/demos/README.md
+++ b/tfjs-layers/demos/README.md
@@ -16,7 +16,7 @@ Once the development environment is prepared, execute the build script from the
```
The script will construct a number of Keras models in Python and benchmark their training using the TensorFlow backend. When it is complete, it will bring up a
-local HTTP server. Navigate to the local URL spcecified in stdout to bring up
+local HTTP server. Navigate to the local URL specified in stdout to bring up
the benchmarks page UI. There will be a button to begin the JS side of the
benchmarks. Clicking the button will run through and time the same models, now
running in the browser.
diff --git a/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts b/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts
index a2a03045b54..b0b0aa15bff 100644
--- a/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts
+++ b/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts
@@ -106,7 +106,7 @@ describe('TransformerDecoder', () => {
const config = testLayer.getConfig();
const restored = TransformerDecoder.fromConfig(TransformerDecoder, config);
- // Initializers don't get serailized with customObjects.
+ // Initializers don't get serialized with customObjects.
delete ((config['kernelInitializer'] as serialization.ConfigDict
)['config'] as serialization.ConfigDict)['customObjects'];
delete ((config['biasInitializer'] as serialization.ConfigDict
@@ -167,5 +167,5 @@ describe('TransformerDecoder', () => {
expectTensorsClose(outputCache, noLoopCache);
});
- // TODO(pforderique): Test mask propogation once supported.
+ // TODO(pforderique): Test mask propagation once supported.
});
diff --git a/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts b/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts
index b859e16e27f..f96b37f5d7f 100644
--- a/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts
+++ b/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts
@@ -70,7 +70,7 @@ export declare interface GPT2CausalLMArgs extends PipelineModelArgs {
}
/**
- * An end-to-end GPT2 model for causal langauge modeling.
+ * An end-to-end GPT2 model for causal language modeling.
*
* A causal language model (LM) predicts the next token based on previous
* tokens. This task setup can be used to train the model unsupervised on
diff --git a/tfjs-layers/src/layers/nlp/multihead_attention.ts b/tfjs-layers/src/layers/nlp/multihead_attention.ts
index 46253c117c3..8c9df5ea27c 100644
--- a/tfjs-layers/src/layers/nlp/multihead_attention.ts
+++ b/tfjs-layers/src/layers/nlp/multihead_attention.ts
@@ -703,7 +703,7 @@ export class MultiHeadAttention extends Layer {
newInputs = [inputs, kwargs['value']].concat(kwargs['key'] ?? []);
- // TODO(pforderique): Support mask propogation.
+ // TODO(pforderique): Support mask propagation.
return super.apply(newInputs, kwargs);
}
diff --git a/tfjs-layers/src/layers/normalization.ts b/tfjs-layers/src/layers/normalization.ts
index 03193c83af0..1e4f8c56b8c 100644
--- a/tfjs-layers/src/layers/normalization.ts
+++ b/tfjs-layers/src/layers/normalization.ts
@@ -430,7 +430,7 @@ export interface LayerNormalizationLayerArgs extends LayerArgs {
axis?: number|number[];
/**
- * A small positive float added to variance to avoid divison by zero.
+ * A small positive float added to variance to avoid division by zero.
* Defaults to 1e-3.
*/
epsilon?: number;
diff --git a/tfjs-layers/src/layers/normalization_test.ts b/tfjs-layers/src/layers/normalization_test.ts
index b7e3db11c6c..ad02fcf8fb4 100644
--- a/tfjs-layers/src/layers/normalization_test.ts
+++ b/tfjs-layers/src/layers/normalization_test.ts
@@ -353,7 +353,7 @@ describeMathCPUAndWebGL2('BatchNormalization Layers: Tensor', () => {
const x = tensor2d([[1, 2], [3, 4]], [2, 2]);
expectTensorsClose(layer.apply(x) as Tensor, x, 0.01);
expect(layer.getWeights().length).toEqual(3);
- // Firt weight is gamma.
+ // First weight is gamma.
expectTensorsClose(layer.getWeights()[0], onesLike(layer.getWeights()[0]));
// Second weight is moving mean.
expectTensorsClose(layer.getWeights()[1], zerosLike(layer.getWeights()[1]));
@@ -366,7 +366,7 @@ describeMathCPUAndWebGL2('BatchNormalization Layers: Tensor', () => {
const x = tensor2d([[1, 2], [3, 4]], [2, 2]);
expectTensorsClose(layer.apply(x) as Tensor, x, 0.01);
expect(layer.getWeights().length).toEqual(3);
- // Firt weight is beta.
+ // First weight is beta.
expectTensorsClose(layer.getWeights()[0], zerosLike(layer.getWeights()[0]));
// Second weight is moving mean.
expectTensorsClose(layer.getWeights()[1], zerosLike(layer.getWeights()[1]));
diff --git a/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts b/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts
index 9afc0b7f85d..85089e771f7 100644
--- a/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts
+++ b/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts
@@ -88,7 +88,7 @@ describeMathCPUAndGPU('Resizing Layer', () => {
});
it('Returns a tensor of the correct dtype', () => {
- // do a same resizing operation, cheeck tensors dtypes and content
+ // do a same resizing operation, check tensors dtypes and content
const height = 40;
const width = 60;
const numChannels = 3;
diff --git a/tfjs-layers/src/layers/preprocessing/random_height.ts b/tfjs-layers/src/layers/preprocessing/random_height.ts
index fa21f371e53..728d58c4bad 100644
--- a/tfjs-layers/src/layers/preprocessing/random_height.ts
+++ b/tfjs-layers/src/layers/preprocessing/random_height.ts
@@ -35,7 +35,7 @@ type InterpolationType = typeof INTERPOLATION_KEYS[number];
*
* The input should be a 3D (unbatched) or
* 4D (batched) tensor in the `"channels_last"` image data format. Input pixel
- * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of interger
+ * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer
* or floating point dtype. By default, the layer will output floats.
*
* tf methods implemented in tfjs: 'bilinear', 'nearest',
@@ -48,7 +48,7 @@ export class RandomHeight extends BaseRandomLayer {
/** @nocollapse */
static override className = 'RandomHeight';
private readonly factor: number | [number, number];
- private readonly interpolation?: InterpolationType; // defualt = 'bilinear
+ private readonly interpolation?: InterpolationType; // default = 'bilinear
private heightLower: number;
private heightUpper: number;
private imgWidth: number;
diff --git a/tfjs-layers/src/layers/preprocessing/random_width.ts b/tfjs-layers/src/layers/preprocessing/random_width.ts
index 60b71490e6e..c969bd9c505 100644
--- a/tfjs-layers/src/layers/preprocessing/random_width.ts
+++ b/tfjs-layers/src/layers/preprocessing/random_width.ts
@@ -35,7 +35,7 @@ type InterpolationType = typeof INTERPOLATION_KEYS[number];
*
* The input should be a 3D (unbatched) or
* 4D (batched) tensor in the `"channels_last"` image data format. Input pixel
- * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of interger
+ * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer
* or floating point dtype. By default, the layer will output floats.
*
* tf methods implemented in tfjs: 'bilinear', 'nearest',
@@ -48,7 +48,7 @@ export class RandomWidth extends BaseRandomLayer {
/** @nocollapse */
static override className = 'RandomWidth';
private readonly factor: number | [number, number];
- private readonly interpolation?: InterpolationType; // defualt = 'bilinear
+ private readonly interpolation?: InterpolationType; // default = 'bilinear
private widthLower: number;
private widthUpper: number;
private imgHeight: number;
diff --git a/tfjs-layers/src/layers/recurrent.ts b/tfjs-layers/src/layers/recurrent.ts
index b2af94772ed..95153ca239c 100644
--- a/tfjs-layers/src/layers/recurrent.ts
+++ b/tfjs-layers/src/layers/recurrent.ts
@@ -252,7 +252,7 @@ export declare interface BaseRNNLayerArgs extends LayerArgs {
* see section "Note on passing external constants" below.
* Porting Node: PyKeras overrides the `call()` signature of RNN cells,
* which are Layer subtypes, to accept two arguments. tfjs-layers does
- * not do such overriding. Instead we preseve the `call()` signature,
+ * not do such overriding. Instead we preserve the `call()` signature,
* which due to its `Tensor|Tensor[]` argument and return value is
* flexible enough to handle the inputs and states.
* - a `stateSize` attribute. This can be a single integer (single state)
@@ -757,7 +757,7 @@ export class RNN extends Layer {
const output = this.returnSequences ? outputs : lastOutput;
- // TODO(cais): Porperty set learning phase flag.
+ // TODO(cais): Property set learning phase flag.
if (this.returnState) {
return [output].concat(states);
@@ -1933,7 +1933,7 @@ export class StackedRNNCells extends RNNCell {
get stateSize(): number[] {
// States are a flat list in reverse order of the cell stack.
- // This allows perserving the requirement `stack.statesize[0] ===
+ // This allows preserving the requirement `stack.statesize[0] ===
// outputDim`. E.g., states of a 2-layer LSTM would be `[h2, c2, h1, c1]`,
// assuming one LSTM has states `[h, c]`.
const stateSize: number[] = [];
@@ -2098,7 +2098,7 @@ export class StackedRNNCells extends RNNCell {
batchSetValue(tuples);
}
- // TODO(cais): Maybe implemnt `losses` and `getLossesFor`.
+ // TODO(cais): Maybe implement `losses` and `getLossesFor`.
}
serialization.registerClass(StackedRNNCells);
From 8206e95ff5b6da2e2d69f950b35082e488783018 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Fri, 11 Oct 2024 03:50:47 +0530
Subject: [PATCH 19/33] Fix typos in the documentation strings of the tfjs-node
directory (#8412)
---
tfjs-node/WINDOWS_TROUBLESHOOTING.md | 6 +++---
tfjs-node/binding/tfjs_backend.cc | 4 ++--
tfjs-node/binding/tfjs_backend.h | 2 +-
tfjs-node/scripts/make-version | 2 +-
tfjs-node/src/callbacks.ts | 4 ++--
tfjs-node/src/io/file_system_test.ts | 2 +-
6 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/tfjs-node/WINDOWS_TROUBLESHOOTING.md b/tfjs-node/WINDOWS_TROUBLESHOOTING.md
index 3e7a36aa7ee..c20b72df52c 100644
--- a/tfjs-node/WINDOWS_TROUBLESHOOTING.md
+++ b/tfjs-node/WINDOWS_TROUBLESHOOTING.md
@@ -7,7 +7,7 @@ The tfjs-node package uses the [node-gyp](https://github.com/nodejs/node-gyp) pa
This can happen for a variety of reasons. First, to inspect what is missing either `cd node_modules/@tensorflow/tfjs-node` or clone the [tensorflow/tfjs repo](https://github.com/tensorflow/tfjs).
-After `cd`'ing or cloning, run the following command (you might need node-gyp installed globablly `npm install -g node-gyp`):
+After `cd`'ing or cloning, run the following command (you might need node-gyp installed globally `npm install -g node-gyp`):
```sh
node-gyp configure --verbose
@@ -22,9 +22,9 @@ gyp verb check python checking for Python executable "python2" in the PATH
gyp verb `which` failed Error: not found: python2
```
-This means that node-gyp expects a 'python2' exe somewhere in `%PATH%`. Try running this command from an Admin (elevated privilaged prompt):
+This means that node-gyp expects a 'python2' exe somewhere in `%PATH%`. Try running this command from an Admin (elevated privileged prompt):
-You can try running this from an Adminstrative prompt:
+You can try running this from an Administrative prompt:
```sh
$ npm --add-python-to-path='true' --debug install --global windows-build-tools
diff --git a/tfjs-node/binding/tfjs_backend.cc b/tfjs-node/binding/tfjs_backend.cc
index f17efd2df23..4267cf5b960 100644
--- a/tfjs-node/binding/tfjs_backend.cc
+++ b/tfjs-node/binding/tfjs_backend.cc
@@ -135,7 +135,7 @@ TFE_TensorHandle *CreateTFE_TensorHandleFromTypedArray(napi_env env,
if (dtype == TF_INT64) {
// Currently, int64-type Tensors are represented as Int32Arrays.
// To represent a int64-type Tensor of `n` elements, an Int32Array of
- // length `2 * n` is requried. This is why the length-match checking
+ // length `2 * n` is required. This is why the length-match checking
// logic is special-cased for int64.
if (array_length != num_elements * 2) {
NAPI_THROW_ERROR(
@@ -379,7 +379,7 @@ void CopyTFE_TensorHandleDataToResourceArray(
TF_AutoStatus status;
- // Create a JS string to stash the resouce handle into.
+ // Create a JS string to stash the resource handle into.
napi_status nstatus;
size_t byte_length = TF_TensorByteSize(tensor.tensor);
nstatus = napi_create_array_with_length(env, byte_length, result);
diff --git a/tfjs-node/binding/tfjs_backend.h b/tfjs-node/binding/tfjs_backend.h
index ca85bc2116c..fee71d06074 100644
--- a/tfjs-node/binding/tfjs_backend.h
+++ b/tfjs-node/binding/tfjs_backend.h
@@ -36,7 +36,7 @@ class TFJSBackend {
static TFJSBackend *Create(napi_env env);
// Creates a new Tensor with given shape and data and returns an ID that
- // refernces the new Tensor.
+ // references the new Tensor.
// - shape_value (number[])
// - dtype_value (number)
// - array_value (TypedArray|Array)
diff --git a/tfjs-node/scripts/make-version b/tfjs-node/scripts/make-version
index c3379eb910f..b062e0c79fa 100755
--- a/tfjs-node/scripts/make-version
+++ b/tfjs-node/scripts/make-version
@@ -33,5 +33,5 @@ fs.writeFile('src/version.ts', versionCode, err => {
if (err) {
throw new Error(`Could not save version file ${version}: ${err}`);
}
- console.log(`Version file for version ${version} saved sucessfully.`);
+ console.log(`Version file for version ${version} saved successfully.`);
});
diff --git a/tfjs-node/src/callbacks.ts b/tfjs-node/src/callbacks.ts
index eabd3a38205..c58da7932d5 100644
--- a/tfjs-node/src/callbacks.ts
+++ b/tfjs-node/src/callbacks.ts
@@ -46,7 +46,7 @@ export class ProgbarLogger extends CustomCallback {
private readonly RENDER_THROTTLE_MS = 50;
/**
- * Construtor of LoggingCallback.
+ * Constructor of LoggingCallback.
*/
constructor() {
super({
@@ -150,7 +150,7 @@ const BASE_NUM_DIGITS = 2;
const MAX_NUM_DECIMAL_PLACES = 4;
/**
- * Get a succint string representation of a number.
+ * Get a succinct string representation of a number.
*
* Uses decimal notation if the number isn't too small.
* Otherwise, use engineering notation.
diff --git a/tfjs-node/src/io/file_system_test.ts b/tfjs-node/src/io/file_system_test.ts
index 13627f55da5..dda594d25b7 100644
--- a/tfjs-node/src/io/file_system_test.ts
+++ b/tfjs-node/src/io/file_system_test.ts
@@ -445,7 +445,7 @@ describe('File system IOHandler', () => {
const history2 =
await model2.fit(xs, ys, {epochs: 2, shuffle: false, verbose: 0});
// The final loss value from training the model twice, 2 epochs
- // at a time, should be equal to the final loss of trainig the
+ // at a time, should be equal to the final loss of training the
// model only once with 4 epochs.
expect(history2.history.loss[1]).toBeCloseTo(18.603);
});
From 8d594e32cad5c2d61f94266328d626ecb446b395 Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Fri, 11 Oct 2024 04:54:41 +0530
Subject: [PATCH 20/33] Fix typos in the documentation strings of the tfjs-core
directory (#8413)
---
tfjs-core/scripts/cloud_funcs/README.md | 4 ++--
tfjs-core/src/backends/complex_util.ts | 2 +-
tfjs-core/src/backends/einsum_util.ts | 2 +-
tfjs-core/src/backends/non_max_suppression_impl.ts | 2 +-
tfjs-core/src/engine.ts | 2 +-
tfjs-core/src/engine_test.ts | 2 +-
tfjs-core/src/io/browser_files_test.ts | 2 +-
tfjs-core/src/io/composite_array_buffer.ts | 2 +-
tfjs-core/src/io/http.ts | 2 +-
tfjs-core/src/io/indexed_db.ts | 2 +-
tfjs-core/src/jasmine_util.ts | 4 ++--
tfjs-core/src/ops/depthwise_conv2d_test.ts | 2 +-
tfjs-core/src/ops/ragged_tensor_to_tensor.ts | 2 +-
tfjs-core/src/serialization.ts | 2 +-
tfjs-core/src/tensor.ts | 2 +-
15 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/tfjs-core/scripts/cloud_funcs/README.md b/tfjs-core/scripts/cloud_funcs/README.md
index 2d56e9e01ec..7f4977926bf 100644
--- a/tfjs-core/scripts/cloud_funcs/README.md
+++ b/tfjs-core/scripts/cloud_funcs/README.md
@@ -1,7 +1,7 @@
This directory contains the following Google Cloud Functions.
### `trigger_nightly`
-Programatically triggers a Cloud Build on master. This function is called by the Cloud Scheduler at 3am EST every day (configurable via the Cloud Scheduler UI).
+Programmatically triggers a Cloud Build on master. This function is called by the Cloud Scheduler at 3am EST every day (configurable via the Cloud Scheduler UI).
You can also trigger the function manually via the Cloud UI.
Command to re-deploy:
@@ -45,6 +45,6 @@ gcloud functions deploy sync_reactnative \
The pipeline looks like this:
1) At 3am, Cloud Scheduler writes to `nightly` topic
-2) That triggers the `nightly` function, which starts a build programatically
+2) That triggers the `nightly` function, which starts a build programmatically
3) That build runs and writes its status to `cloud-builds` topic
4) That triggers the `send_email` function, which sends email and chat with the build status.
diff --git a/tfjs-core/src/backends/complex_util.ts b/tfjs-core/src/backends/complex_util.ts
index 28bfdd05260..59e78069f3b 100644
--- a/tfjs-core/src/backends/complex_util.ts
+++ b/tfjs-core/src/backends/complex_util.ts
@@ -87,7 +87,7 @@ export function complexWithEvenIndex(complex: Float32Array):
}
/**
- * Extracts odd indexed comple values in the given array.
+ * Extracts odd indexed complete values in the given array.
* @param complex The complex tensor values
*/
export function complexWithOddIndex(complex: Float32Array):
diff --git a/tfjs-core/src/backends/einsum_util.ts b/tfjs-core/src/backends/einsum_util.ts
index aef34779f71..ff96aa278a7 100644
--- a/tfjs-core/src/backends/einsum_util.ts
+++ b/tfjs-core/src/backends/einsum_util.ts
@@ -167,7 +167,7 @@ export function checkEinsumDimSizes(
*
* @param summedDims indices to the dimensions being summed over.
* @param idDims A look up table for the dimensions present in each input
- * tensor. Each consituent array contains indices for the dimensions in the
+ * tensor.Each constituent array contains indices for the dimensions in the
* corresponding input tensor.
*
* @return A map with two fields:
diff --git a/tfjs-core/src/backends/non_max_suppression_impl.ts b/tfjs-core/src/backends/non_max_suppression_impl.ts
index 51b45a57799..9b8683c6943 100644
--- a/tfjs-core/src/backends/non_max_suppression_impl.ts
+++ b/tfjs-core/src/backends/non_max_suppression_impl.ts
@@ -188,7 +188,7 @@ function intersectionOverUnion(boxes: TypedArray, i: number, j: number) {
// A Gaussian penalty function, this method always returns values in [0, 1].
// The weight is a function of similarity, the more overlap two boxes are, the
-// smaller the weight is, meaning highly overlapping boxe will be significantly
+// smaller the weight is,meaning highly overlapping boxes will be significantly
// penalized. On the other hand, a non-overlapping box will not be penalized.
function suppressWeight(iouThreshold: number, scale: number, iou: number) {
const weight = Math.exp(scale * iou * iou);
diff --git a/tfjs-core/src/engine.ts b/tfjs-core/src/engine.ts
index a29b04c24aa..0fec3e3b1f2 100644
--- a/tfjs-core/src/engine.ts
+++ b/tfjs-core/src/engine.ts
@@ -313,7 +313,7 @@ export class Engine implements TensorTracker, DataMover {
/**
* Initializes a backend by looking up the backend name in the factory
* registry and calling the factory method. Returns a boolean representing
- * whether the initialization of the backend suceeded. Throws an error if
+ * whether the initialization of the backend succeeded. Throws an error if
* there is no backend in the factory registry.
*/
private initializeBackend(backendName: string):
diff --git a/tfjs-core/src/engine_test.ts b/tfjs-core/src/engine_test.ts
index 97b7e787114..6389c604b2b 100644
--- a/tfjs-core/src/engine_test.ts
+++ b/tfjs-core/src/engine_test.ts
@@ -243,7 +243,7 @@ describe('Backend registration', () => {
throw new Error('failed to create async2');
}, 101 /* priority */);
- // Await for the library to find the best backend that succesfully
+ // Await for the library to find the best backend that successfully
// initializes.
await tf.ready();
expect(tf.backend()).toEqual(testBackend);
diff --git a/tfjs-core/src/io/browser_files_test.ts b/tfjs-core/src/io/browser_files_test.ts
index 349cc1f172c..f48d15ebdeb 100644
--- a/tfjs-core/src/io/browser_files_test.ts
+++ b/tfjs-core/src/io/browser_files_test.ts
@@ -259,7 +259,7 @@ describeWithFlags('browserDownloads', BROWSER_ENVS, () => {
// Verify that the default file names are used.
expect(jsonAnchor.download).toEqual('model.json');
expect(jsonAnchor.clicked).toEqual(1);
- // The weight file should not have been downoaded.
+ // The weight file should not have been downloaded.
expect(weightDataAnchor.download).toEqual(undefined);
expect(weightDataAnchor.clicked).toEqual(0);
diff --git a/tfjs-core/src/io/composite_array_buffer.ts b/tfjs-core/src/io/composite_array_buffer.ts
index 411fb074083..76015b9abdb 100644
--- a/tfjs-core/src/io/composite_array_buffer.ts
+++ b/tfjs-core/src/io/composite_array_buffer.ts
@@ -91,7 +91,7 @@ export class CompositeArrayBuffer {
start = end;
}
- // Set the byteLenghth
+ // Set the byteLength
if (this.shards.length === 0) {
this.byteLength = 0;
}
diff --git a/tfjs-core/src/io/http.ts b/tfjs-core/src/io/http.ts
index a8ba2da62ca..e5d1020a0ca 100644
--- a/tfjs-core/src/io/http.ts
+++ b/tfjs-core/src/io/http.ts
@@ -324,7 +324,7 @@ IORouterRegistry.registerLoadRouter(httpRouter);
* The following GitHub Gist
* https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864
* implements a server based on [flask](https://github.com/pallets/flask) that
- * can receive the request. Upon receiving the model artifacts via the requst,
+ * can receive the request. Upon receiving the model artifacts via the request,
* this particular server reconstitutes instances of [Keras
* Models](https://keras.io/models/model/) in memory.
*
diff --git a/tfjs-core/src/io/indexed_db.ts b/tfjs-core/src/io/indexed_db.ts
index 42de509413d..2eb479b192f 100644
--- a/tfjs-core/src/io/indexed_db.ts
+++ b/tfjs-core/src/io/indexed_db.ts
@@ -255,7 +255,7 @@ IORouterRegistry.registerLoadRouter(indexedDBRouter);
*
* @param modelPath A unique identifier for the model to be saved. Must be a
* non-empty string.
- * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),
+ * @returns An instance of `BrowserIndexedDB` (subclass of `IOHandler`),
* which can be used with, e.g., `tf.Model.save`.
*/
export function browserIndexedDB(modelPath: string): IOHandler {
diff --git a/tfjs-core/src/jasmine_util.ts b/tfjs-core/src/jasmine_util.ts
index 19f554d18f6..bf5d28e1b9c 100644
--- a/tfjs-core/src/jasmine_util.ts
+++ b/tfjs-core/src/jasmine_util.ts
@@ -105,7 +105,7 @@ export interface TestFilter {
* Tests that have the substrings specified by the include or startsWith
* will be included in the test run, unless one of the substrings specified
* by `excludes` appears in the name.
- * @param customInclude Function to programatically include a test.
+ * @param customInclude Function to programmatically include a test.
* If this function returns true, a test will immediately run. Otherwise,
* `testFilters` is used for fine-grained filtering.
*
@@ -124,7 +124,7 @@ export function setupTestFilters(
* Filter method that returns boolean, if a given test should run or be
* ignored based on its name. The exclude list has priority over the
* include list. Thus, if a test matches both the exclude and the include
- * list, it will be exluded.
+ * list, it will be excluded.
*/
// tslint:disable-next-line: no-any
const specFilter = (spec: any) => {
diff --git a/tfjs-core/src/ops/depthwise_conv2d_test.ts b/tfjs-core/src/ops/depthwise_conv2d_test.ts
index e6ccc7b0e2a..83e1b34f775 100644
--- a/tfjs-core/src/ops/depthwise_conv2d_test.ts
+++ b/tfjs-core/src/ops/depthwise_conv2d_test.ts
@@ -1214,7 +1214,7 @@ describeWithFlags('depthwiseConv2d gradients', ALL_ENVS, () => {
[[[1, 1], [1, 1], [0, 0]], [[0, 1], [1, 1], [1, 1]]],
[[[1, 0], [1, 1], [0, 0]], [[0, 1], [1, 0], [0, 0]]]
]);
- // result of convolution operatoin
+ // result of convolution operation
result = tf.tensor4d([
[
[[2, 8, 8, 7, 2, 2], [6, 3, 1, 1, 0, 0]],
diff --git a/tfjs-core/src/ops/ragged_tensor_to_tensor.ts b/tfjs-core/src/ops/ragged_tensor_to_tensor.ts
index 2485a16a335..e3c85df50e4 100644
--- a/tfjs-core/src/ops/ragged_tensor_to_tensor.ts
+++ b/tfjs-core/src/ops/ragged_tensor_to_tensor.ts
@@ -70,7 +70,7 @@ import {op} from './operation';
* "ROW_SPLITS": the row_splits tensor from the ragged tensor.
* "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
* "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then
- * it is preceeded by "FIRST_DIM_SIZE". The tensors are in the order of
+ * it is preceded by "FIRST_DIM_SIZE". The tensors are in the order of
* the dimensions.
* @return A Tensor. Has the same type as values.
* @doc {heading: 'Operations', subheading: 'Ragged'}
diff --git a/tfjs-core/src/serialization.ts b/tfjs-core/src/serialization.ts
index 621a9a63962..14d452b39e9 100644
--- a/tfjs-core/src/serialization.ts
+++ b/tfjs-core/src/serialization.ts
@@ -225,7 +225,7 @@ export class SerializationMap {
*
* @param cls The class to be registered. It must have a public static member
* called `className` defined and the value must be a non-empty string.
- * @param pkg The pakcage name that this class belongs to. This used to define
+ * @param pkg The package name that this class belongs to. This used to define
* the key in GlobalCustomObject. If not defined, it defaults to `Custom`.
* @param name The name that user specified. It defaults to the actual name of
* the class as specified by its static `className` property.
diff --git a/tfjs-core/src/tensor.ts b/tfjs-core/src/tensor.ts
index 137d3b48705..9b37029807f 100644
--- a/tfjs-core/src/tensor.ts
+++ b/tfjs-core/src/tensor.ts
@@ -518,7 +518,7 @@ Object.defineProperty(Tensor, Symbol.hasInstance, {
export function getGlobalTensorClass() {
// Use getGlobal so that we can augment the Tensor class across package
- // boundaries becase the node resolution alg may result in different modules
+ // boundaries because the node resolution alg may result in different modules
// being returned for this file depending on the path they are loaded from.
return getGlobal('Tensor', () => {
return Tensor;
From 33d939a55c22f7fe1eef3183f01d2a23d7e780da Mon Sep 17 00:00:00 2001
From: mattvr <4052466+mattvr@users.noreply.github.com>
Date: Fri, 11 Oct 2024 12:50:26 -0700
Subject: [PATCH 21/33] webgpu: fix: conditionally call deprecated
GPUAdapter.requestAdapterInfo (#8392)
* fix: conditionally call deprecated GPUAdapter.requestAdapterInfo
* Update tfjs-backend-webgpu/src/base.ts
Co-authored-by: Kenta Moriuchi
* fix: update webgpu types
---------
Co-authored-by: Kenta Moriuchi
---
package.json | 2 +-
tfjs-backend-webgpu/src/base.ts | 8 +++++++-
yarn.lock | 8 ++++----
3 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/package.json b/package.json
index 16b119a9cfb..2588de32671 100644
--- a/package.json
+++ b/package.json
@@ -29,7 +29,7 @@
"@types/semver": "^7.3.9",
"@types/shelljs": "^0.8.7",
"@types/dom-webcodecs": "0.1.4",
- "@webgpu/types": "0.1.38",
+ "@webgpu/types": "0.1.48",
"ajv": "~6.12.3",
"argparse": "^1.0.10",
"chalk": "~2.4.2",
diff --git a/tfjs-backend-webgpu/src/base.ts b/tfjs-backend-webgpu/src/base.ts
index 1de43149b07..cf3ac665e24 100644
--- a/tfjs-backend-webgpu/src/base.ts
+++ b/tfjs-backend-webgpu/src/base.ts
@@ -57,7 +57,13 @@ if (isWebGPUSupported()) {
};
const device: GPUDevice = await adapter.requestDevice(deviceDescriptor);
- const adapterInfo = await adapter.requestAdapterInfo();
+ const adapterInfo =
+ 'info' in adapter
+ ? adapter.info
+ : 'requestAdapterInfo' in adapter
+ // tslint:disable-next-line:no-any
+ ? await (adapter as any).requestAdapterInfo()
+ : undefined;
return new WebGPUBackend(device, adapterInfo);
}, 3 /*priority*/);
}
diff --git a/yarn.lock b/yarn.lock
index 69db9f15c68..1f1f69345a4 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -513,10 +513,10 @@
resolved "https://registry.yarnpkg.com/@verdaccio/ui-theme/-/ui-theme-6.0.0-6-next.23.tgz#268da5091e1e9264fe87b8b94c0ac596e9e54879"
integrity sha512-GXpEPdZJm6o+2VAxzUsKaiDriS+5enqr7Gjrb2Bttcd+IkOuC8lDsoFHxIv0ib4JudZJ/aKsRYL3TN2AetPFjw==
-"@webgpu/types@0.1.38":
- version "0.1.38"
- resolved "https://registry.npmjs.org/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
- integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
+"@webgpu/types@0.1.48":
+ version "0.1.48"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.48.tgz#8ab741852283118bd633345c20e218faa7211e9c"
+ integrity sha512-e3zmDEPih4Rle+JrP5cT8nCCtDizoUpEaN72OuD1clbhXGERtn0wwuMdxOrBymu3kMLWKDd8hd+ERhSheLuLTg==
"@xmldom/xmldom@^0.7.3":
version "0.7.5"
From cb6206c291dcd07428b558bea4ab03cc21f6114b Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Thu, 5 Dec 2024 10:31:40 -0800
Subject: [PATCH 22/33] Update README.md (#8464)
---
tfjs-converter/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tfjs-converter/README.md b/tfjs-converter/README.md
index 3a8821cb978..07587a9867f 100644
--- a/tfjs-converter/README.md
+++ b/tfjs-converter/README.md
@@ -319,7 +319,7 @@ jax_conversion.convert_jax(
```
See
-[here](https://github.com/google/jax/tree/main/jax/experimental/jax2tf#shape-polymorphic-conversion)
+[here](https://github.com/jax-ml/jax/tree/main/jax/experimental/jax2tf#shape-polymorphic-conversion)
for more details on the exact syntax for this argument.
When converting JAX models, you can also pass any [options that
From 2644bd0d6cea677f80e44ed4a44bea5e04aabeb3 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Wed, 18 Dec 2024 17:57:50 -0800
Subject: [PATCH 23/33] [WebGPU] Make dataToGPU upload to GPU if data is on CPU
(#8483)
---
tfjs-backend-webgpu/src/backend_webgpu.ts | 9 ++++++---
tfjs-backend-webgpu/src/backend_webgpu_test.ts | 12 ++++++++++++
2 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/tfjs-backend-webgpu/src/backend_webgpu.ts b/tfjs-backend-webgpu/src/backend_webgpu.ts
index ceae66c513a..ebf517fa550 100644
--- a/tfjs-backend-webgpu/src/backend_webgpu.ts
+++ b/tfjs-backend-webgpu/src/backend_webgpu.ts
@@ -594,8 +594,9 @@ export class WebGPUBackend extends KernelBackend {
* @param dataId The source tensor.
*/
override readToGPU(dataId: DataId): GPUData {
- const srcTensorData = this.tensorMap.get(dataId);
- const {values, dtype, shape, resource} = srcTensorData;
+ let srcTensorData = this.tensorMap.get(dataId);
+ const {values, dtype, shape} = srcTensorData;
+ let resource = srcTensorData.resource;
if (dtype === 'complex64') {
throw new Error('Does not support reading buffer for complex64 dtype.');
@@ -603,7 +604,9 @@ export class WebGPUBackend extends KernelBackend {
if (resource == null) {
if (values != null) {
- throw new Error('Data is not on GPU but on CPU.');
+ this.uploadToGPU(dataId);
+ srcTensorData = this.tensorMap.get(dataId);
+ resource = srcTensorData.resource;
} else {
throw new Error('There is no data on GPU or CPU.');
}
diff --git a/tfjs-backend-webgpu/src/backend_webgpu_test.ts b/tfjs-backend-webgpu/src/backend_webgpu_test.ts
index ed8149f409a..5e06905d7c8 100644
--- a/tfjs-backend-webgpu/src/backend_webgpu_test.ts
+++ b/tfjs-backend-webgpu/src/backend_webgpu_test.ts
@@ -200,6 +200,18 @@ describeWebGPU('backend webgpu', () => {
await c3.data();
tf.env().set('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE', savedFlag);
});
+
+ it('dataToGPU uploads to GPU if the tensor is on CPU', async () => {
+ const webGPUBackend = (tf.backend() as WebGPUBackend);
+ const data = [1,2,3,4,5];
+ const tensor = tf.tensor1d(data);
+ const res = tensor.dataToGPU();
+ expect(res.buffer).toBeDefined();
+ const resData = await webGPUBackend.getBufferData(res.buffer);
+ const values = tf.util.convertBackendValuesAndArrayBuffer(
+ resData, res.tensorRef.dtype);
+ expectArraysEqual(values, data);
+ });
});
describeWebGPU('backendWebGPU', () => {
From 78345c5f33736f5f090fd6719d3b336c85f6ed62 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Wed, 23 Apr 2025 10:55:01 -0700
Subject: [PATCH 24/33] Move PR Continuous Integration to Github Actions
(#8534)
Move GCP presubmits to Github actions. This allows us to run WebGL / WebGPU tests, since Github Actions have a MacOS runner with a GPU. This should unblock several PRs.
---
.github/workflows/tfjs-ci.yml | 54 +++++++++++++++++++
BUILD.bazel | 21 ++++++--
package.json | 4 ++
tfjs-backend-webgl/BUILD.bazel | 30 +++++++++++
tfjs-backend-webgpu/BUILD.bazel | 7 +++
tfjs-backend-webgpu/src/setup_test.ts | 13 +++++
tfjs-layers/BUILD.bazel | 10 ++++
.../layers/nlp/multihead_attention_test.ts | 1 +
tools/karma_template.conf.js | 25 +++++++--
9 files changed, 157 insertions(+), 8 deletions(-)
create mode 100644 .github/workflows/tfjs-ci.yml
diff --git a/.github/workflows/tfjs-ci.yml b/.github/workflows/tfjs-ci.yml
new file mode 100644
index 00000000000..9004ae9a8f0
--- /dev/null
+++ b/.github/workflows/tfjs-ci.yml
@@ -0,0 +1,54 @@
+name: TFJS Continuous Integration
+
+on:
+ push:
+ branches: [ $default-branch ]
+ pull_request:
+ branches: [ $default-branch ]
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: bazel-contrib/setup-bazel@0.14.0
+ with:
+ # Avoid downloading Bazel every time.
+ bazelisk-cache: true
+ # Store build cache per workflow.
+ disk-cache: ${{ github.workflow }}-cpu
+ # Share repository cache between workflows.
+ repository-cache: true
+ - uses: actions/checkout@v4
+ - name: Test TFJS CPU
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20.x
+ cache: 'npm'
+ - run: npm i -g yarn
+ - run: yarn install
+ - run: yarn test-cpu
+
+ test-gpu-mac:
+ runs-on: macos-latest-xlarge # consumer gpu
+ steps:
+ - uses: bazel-contrib/setup-bazel@0.14.0
+ with:
+ # Avoid downloading Bazel every time.
+ bazelisk-cache: true
+ # Store build cache per workflow.
+ disk-cache: ${{ github.workflow }}-gpu-mac
+ # Share repository cache between workflows.
+ repository-cache: true
+ - uses: actions/checkout@v4
+ - name: Test TFJS GPU
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20.x
+ cache: 'npm'
+ - run: npm i -g yarn
+ - run: yarn install
+ - run: yarn test-gpu
diff --git a/BUILD.bazel b/BUILD.bazel
index d1c21570ac0..3f7ea2fa80a 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -49,16 +49,31 @@ headless_flag(
)
test_suite(
- name = "tests",
+ name = "tests_cpu",
tests = [
"//tfjs-backend-cpu:tests",
"//tfjs-backend-wasm:tests",
- "//tfjs-backend-webgl:tests",
"//tfjs-converter:tests",
"//tfjs-core:tests",
"//tfjs-data:tests",
- "//tfjs-layers:tests",
"//tfjs-tfdf:tests",
"//tfjs-tflite:tests",
],
)
+
+test_suite(
+ name = "tests_gpu",
+ tests = [
+ "//tfjs-backend-webgl:tests",
+ "//tfjs-backend-webgpu:tests",
+ "//tfjs-layers:tests",
+ ],
+)
+
+test_suite(
+ name = "tests",
+ tests = [
+ ":tests_cpu",
+ ":tests_gpu",
+ ],
+)
diff --git a/package.json b/package.json
index 2588de32671..8ad2b115358 100644
--- a/package.json
+++ b/package.json
@@ -80,6 +80,10 @@
"scripts": {
"lint": "tslint -p tsconfig_tslint.json",
"test": "bazel test //:tests",
+ "test-cpu": "bazel test --test_output=all //:tests_cpu",
+ "test-gpu": "bazel test --test_output=all //:tests_gpu",
+ "test-non-bazel": "cd link-package && yarn build-deps-for --all",
+ "build": "cd link-package && yarn build",
"test-packages-ci": "yarn generate-cloudbuild-for-packages && ./scripts/run-build.sh",
"nightly-cloudbuild": "NIGHTLY=true yarn generate-cloudbuild-for-packages && gcloud builds submit . --config=cloudbuild_generated.yml --substitutions=_NIGHTLY=true",
"generate-cloudbuild-for-packages": "ts-node -s ./scripts/generate_cloudbuild_for_packages.ts",
diff --git a/tfjs-backend-webgl/BUILD.bazel b/tfjs-backend-webgl/BUILD.bazel
index cbe8e8c9d26..ba970c7af63 100644
--- a/tfjs-backend-webgl/BUILD.bazel
+++ b/tfjs-backend-webgl/BUILD.bazel
@@ -116,6 +116,11 @@ tfjs_web_test(
"bs_chrome_mac",
"bs_android_10",
],
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = STATIC_FILES,
)
@@ -137,6 +142,11 @@ tfjs_web_test(
"bs_safari_mac",
"bs_ios_12",
],
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = STATIC_FILES,
)
@@ -156,6 +166,11 @@ tfjs_web_test(
],
headless = False,
presubmit_browsers = [], # Only run in nightly
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = STATIC_FILES,
)
@@ -175,6 +190,11 @@ tfjs_web_test(
],
headless = False,
presubmit_browsers = [], # Only run in nightly
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = STATIC_FILES,
)
@@ -194,6 +214,11 @@ tfjs_web_test(
],
headless = False,
presubmit_browsers = [], # Only run in nightly
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = STATIC_FILES,
)
@@ -213,6 +238,11 @@ tfjs_web_test(
],
headless = False,
presubmit_browsers = [], # Only run in nightly
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = STATIC_FILES,
)
diff --git a/tfjs-backend-webgpu/BUILD.bazel b/tfjs-backend-webgpu/BUILD.bazel
index b38b970e8e2..dac1208c937 100644
--- a/tfjs-backend-webgpu/BUILD.bazel
+++ b/tfjs-backend-webgpu/BUILD.bazel
@@ -116,3 +116,10 @@ tfjs_web_test(
}),
static_files = STATIC_FILES,
)
+
+test_suite(
+ name = "tests",
+ tests = [
+ ":tfjs-backend-webgpu_test",
+ ],
+)
diff --git a/tfjs-backend-webgpu/src/setup_test.ts b/tfjs-backend-webgpu/src/setup_test.ts
index f881a35fbae..71e13b5491c 100644
--- a/tfjs-backend-webgpu/src/setup_test.ts
+++ b/tfjs-backend-webgpu/src/setup_test.ts
@@ -33,6 +33,12 @@ const TEST_FILTERS: TestFilter[] = [
'gradient', // gradient function not found.
]
},
+ {
+ startsWith: 'pow',
+ excludes: [
+ 'int32' // MacOS precision issue
+ ],
+ },
{
startsWith: 'exp ',
excludes: [
@@ -62,6 +68,13 @@ const TEST_FILTERS: TestFilter[] = [
excludes: [
'gradients', // Failing on MacOS
'gradient with clones', // Failing on MacOS
+ 'propagates NaNs', // Failing on MacOS
+ ],
+ },
+ {
+ startsWith: 'sin ',
+ excludes: [
+ 'propagates NaNs', // Failing on MacOS
],
},
{
diff --git a/tfjs-layers/BUILD.bazel b/tfjs-layers/BUILD.bazel
index 88f1358c418..76223bdafad 100644
--- a/tfjs-layers/BUILD.bazel
+++ b/tfjs-layers/BUILD.bazel
@@ -59,6 +59,11 @@ tfjs_web_test(
],
headless = False,
seed = "12345",
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = [
# Listed here so sourcemaps are served
"//tfjs-layers/src:tfjs-layers_test_bundle",
@@ -79,6 +84,11 @@ tfjs_web_test(
],
headless = False,
seed = "12345",
+ local_browser = select({
+ "@bazel_tools//src/conditions:linux_x86_64": "chrome_webgpu_linux",
+ "@bazel_tools//src/conditions:windows": "chrome_webgpu",
+ "//conditions:default": "chrome_webgpu",
+ }),
static_files = [
# Listed here so sourcemaps are served
"//tfjs-layers/src:tfjs-layers_test_bundle",
diff --git a/tfjs-layers/src/layers/nlp/multihead_attention_test.ts b/tfjs-layers/src/layers/nlp/multihead_attention_test.ts
index 1cc5d77f0e5..bdf0cbcee3a 100644
--- a/tfjs-layers/src/layers/nlp/multihead_attention_test.ts
+++ b/tfjs-layers/src/layers/nlp/multihead_attention_test.ts
@@ -117,6 +117,7 @@ describeMathCPUAndGPU('MultiHeadAttention', () => {
*/
function testMaskedAttention({testcaseName, useBias}: MaskedAttentionArgs) {
it(`${testcaseName}`, () => {
+ pending('Temporarily disabled due to failing on Mac');
const testLayer = new MultiHeadAttention({
numHeads: 2,
keyDim: 2,
diff --git a/tools/karma_template.conf.js b/tools/karma_template.conf.js
index 1e90d9bcba4..2e9b1af0ff7 100644
--- a/tools/karma_template.conf.js
+++ b/tools/karma_template.conf.js
@@ -37,6 +37,7 @@ const CUSTOM_LAUNCHERS = {
os: 'OS X',
os_version: 'High Sierra',
flags: [
+ '--use-mock-keychain',
// For tfjs-data
'--autoplay-policy=no-user-gesture-required',
],
@@ -96,6 +97,7 @@ const CUSTOM_LAUNCHERS = {
flags: [
'--enable-unsafe-webgpu', // Can be removed after WebGPU release
'--use-webgpu-adapter=swiftshader',
+ '--use-mock-keychain',
// https://github.com/tensorflow/tfjs/issues/7631
'--disable-vulkan-fallback-to-gl-for-testing',
@@ -103,37 +105,50 @@ const CUSTOM_LAUNCHERS = {
},
chrome_with_swift_shader: {
base: CHROME,
- flags: ['--blacklist-accelerated-compositing', '--blacklist-webgl']
+ flags: [
+ '--blacklist-accelerated-compositing',
+ '--blacklist-webgl',
+ '--use-mock-keychain',
+ ]
},
chrome_autoplay: {
base: CHROME,
flags: [
'--autoplay-policy=no-user-gesture-required',
'--no-sandbox',
+ '--use-mock-keychain',
],
},
chrome_webgpu_linux: {
- base: 'ChromeCanary',
+ base: 'ChromeHeadless',
flags: [
'--enable-features=Vulkan',
'--enable-unsafe-webgpu',
'--disable-dawn-features=disallow_unsafe_apis',
+ '--use-mock-keychain',
]
},
chrome_webgpu: {
- base: 'ChromeCanary',
+ base: 'Chrome',
flags: [
'--disable-dawn-features=disallow_unsafe_apis',
'--no-sandbox',
+ '--use-mock-keychain',
]
},
chrome_debugging: {
base: 'Chrome',
- flags: ['--remote-debugging-port=9333'],
+ flags: [
+ '--remote-debugging-port=9333',
+ '--use-mock-keychain',
+ ],
},
chrome_no_sandbox: {
base: CHROME,
- flags: ['--no-sandbox'],
+ flags: [
+ '--no-sandbox',
+ '--use-mock-keychain',
+ ],
}
};
From 407c6e56b9c794be0051cdcf83dd0dfafdb09027 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Wed, 23 Apr 2025 11:04:02 -0700
Subject: [PATCH 25/33] Fix wrong branch name in CI workflow
---
.github/workflows/tfjs-ci.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/tfjs-ci.yml b/.github/workflows/tfjs-ci.yml
index 9004ae9a8f0..435b9313755 100644
--- a/.github/workflows/tfjs-ci.yml
+++ b/.github/workflows/tfjs-ci.yml
@@ -2,9 +2,9 @@ name: TFJS Continuous Integration
on:
push:
- branches: [ $default-branch ]
+ branches: [ "master" ]
pull_request:
- branches: [ $default-branch ]
+ branches: [ "master" ]
workflow_dispatch:
permissions:
From f2e55729ba2a032855de52d81883eb3460d71d1d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Val=C3=A9rian=20Rousset?=
Date: Wed, 23 Apr 2025 20:30:04 +0200
Subject: [PATCH 26/33] [tfjs-node] replace deprecated utils (#8425)
Co-authored-by: Matthew Soulanille
---
tfjs-node/src/kernels/TopK.ts | 5 ++---
tfjs-node/src/nodejs_kernel_backend.ts | 9 ++++-----
2 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/tfjs-node/src/kernels/TopK.ts b/tfjs-node/src/kernels/TopK.ts
index 9b581ffbd40..9f77169e1f0 100644
--- a/tfjs-node/src/kernels/TopK.ts
+++ b/tfjs-node/src/kernels/TopK.ts
@@ -16,7 +16,6 @@
*/
import {KernelConfig, scalar, TopK, TopKAttrs, TopKInputs} from '@tensorflow/tfjs';
-import {isNullOrUndefined} from 'util';
import {createTensorsTypeOpAttr, NodeJSKernelBackend} from '../nodejs_kernel_backend';
@@ -28,8 +27,8 @@ export const topKConfig: KernelConfig = {
const backend = args.backend as NodeJSKernelBackend;
const {k, sorted} = args.attrs as unknown as TopKAttrs;
- const kCount = isNullOrUndefined(k) ? 1 : k;
- const isSorted = isNullOrUndefined(sorted) ? true : sorted;
+ const kCount = k ?? 1;
+ const isSorted = sorted ?? true;
const opAttrs = [
{name: 'sorted', type: backend.binding.TF_ATTR_BOOL, value: isSorted},
createTensorsTypeOpAttr('T', x.dtype),
diff --git a/tfjs-node/src/nodejs_kernel_backend.ts b/tfjs-node/src/nodejs_kernel_backend.ts
index a399d31bfa0..a192f313854 100644
--- a/tfjs-node/src/nodejs_kernel_backend.ts
+++ b/tfjs-node/src/nodejs_kernel_backend.ts
@@ -17,7 +17,6 @@
import * as tf from '@tensorflow/tfjs';
import {backend_util, BackendTimingInfo, DataId, DataType, KernelBackend, ModelTensorInfo, Rank, Scalar, scalar, ScalarLike, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, TensorInfo, tidy, util} from '@tensorflow/tfjs';
-import {isArray, isNullOrUndefined} from 'util';
import {encodeInt32ArrayAsInt64, Int64Scalar} from './int64_tensors';
import {TensorMetadata, TFEOpAttr, TFJSBinding} from './tfjs_binding';
@@ -740,7 +739,7 @@ export function getTFDType(dataType: tf.DataType): number {
export function createTensorsTypeOpAttr(
attrName: string,
tensorsOrDtype: tf.Tensor|tf.Tensor[]|tf.DataType): TFEOpAttr {
- if (isNullOrUndefined(tensorsOrDtype)) {
+ if (tensorsOrDtype === null || tensorsOrDtype === undefined) {
throw new Error('Invalid input tensors value.');
}
return {
@@ -757,7 +756,7 @@ export function createTensorsTypeOpAttr(
export function createOpAttr(
attrName: string, tensorsOrDtype: tf.Tensor|tf.Tensor[]|tf.DataType,
value: ScalarLike): TFEOpAttr {
- if (isNullOrUndefined(tensorsOrDtype)) {
+ if (tensorsOrDtype === null || tensorsOrDtype === undefined) {
throw new Error('Invalid input tensors value.');
}
return {name: attrName, type: nodeBackend().binding.TF_BOOL, value};
@@ -765,10 +764,10 @@ export function createOpAttr(
/** Returns the dtype number for a single or list of input Tensors. */
function getTFDTypeForInputs(tensors: tf.Tensor|tf.Tensor[]): number {
- if (isNullOrUndefined(tensors)) {
+ if (tensors === null || tensors === undefined) {
throw new Error('Invalid input tensors value.');
}
- if (isArray(tensors)) {
+ if (Array.isArray(tensors)) {
for (let i = 0; i < tensors.length; i++) {
return getTFDType(tensors[i].dtype);
}
From 29c5db378f0ae2265a9d8dfba6ffbef58d2abfab Mon Sep 17 00:00:00 2001
From: Shivam Mishra <124146945+shmishra99@users.noreply.github.com>
Date: Thu, 24 Apr 2025 00:05:13 +0530
Subject: [PATCH 27/33] Remove --no-site-packages flag from virtualenv command
and update the supported python version. (#8551)
* Remove --no-site-packages flag from virtualenv.
* change python version to least supported version.
---------
Co-authored-by: Matthew Soulanille
---
tfjs-converter/README.md | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/tfjs-converter/README.md b/tfjs-converter/README.md
index 07587a9867f..8aee3079133 100644
--- a/tfjs-converter/README.md
+++ b/tfjs-converter/README.md
@@ -26,14 +26,14 @@ __0. Please make sure that you run in a Docker container or a virtual environmen
__Note__: *Check that [`tf-nightly-cpu-2.0-preview`](https://pypi.org/project/tf-nightly-cpu-2.0-preview/#files) is available for your platform.*
-Most of the times, this means that you have to use Python 3.6.8 in your local
-environment. To force Python 3.6.8 in your local project, you can install
+Most of the times, this means that you have to use Python 3.7.10 in your local
+environment. To force Python 3.7.10 in your local project, you can install
[`pyenv`](https://github.com/pyenv/pyenv) and proceed as follows in the target
directory:
```bash
-pyenv install 3.6.8
-pyenv local 3.6.8
+pyenv install 3.7.10
+pyenv local 3.7.10
```
Now, you can
@@ -41,7 +41,7 @@ Now, you can
a `venv` virtual environment in your current folder:
```bash
-virtualenv --no-site-packages venv
+virtualenv venv
. venv/bin/activate
```
From 8c087a4cb3a7030fef12e7f1661862fb78aef863 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Wed, 23 Apr 2025 11:44:17 -0700
Subject: [PATCH 28/33] [WebGPU] Access properties with `.prop` instead of
`['prop']` (#8503)
Fixes a bug with closure compiler property renaming.
---
tfjs-backend-webgpu/src/backend_webgpu.ts | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tfjs-backend-webgpu/src/backend_webgpu.ts b/tfjs-backend-webgpu/src/backend_webgpu.ts
index ebf517fa550..6afb6824edd 100644
--- a/tfjs-backend-webgpu/src/backend_webgpu.ts
+++ b/tfjs-backend-webgpu/src/backend_webgpu.ts
@@ -73,8 +73,8 @@ const reshapeDispatch =
program: webgpu_program.WebGPUProgram): [number, number, number] => {
const MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE =
device.limits.maxComputeWorkgroupsPerDimension;
- const layout = program['dispatchLayout'];
- const dispatch = program['dispatch'];
+ const layout = program.dispatchLayout;
+ const dispatch = program.dispatch;
if (dispatch.every((d) => d <= MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE)) {
return dispatch;
}
@@ -694,8 +694,8 @@ export class WebGPUBackend extends KernelBackend {
};
const kernelMs = await Promise.all(flattenedActiveTimerQueries);
- res['kernelMs'] = util.sum(kernelMs);
- res['getExtraProfileInfo'] = () =>
+ res.kernelMs = util.sum(kernelMs);
+ res.getExtraProfileInfo = () =>
kernelMs.map((d, i) => ({name: flattenedActiveTimerNames[i], ms: d}))
.map(d => `${d.name}: ${d.ms}`)
.join(', ');
From a83539db79fc6900ef7c5af829ac0c5dbb9ca80b Mon Sep 17 00:00:00 2001
From: croraf
Date: Wed, 23 Apr 2025 20:54:17 +0200
Subject: [PATCH 29/33] Fix description for util_base.ts `assert` function
(#8270)
Co-authored-by: Ping Yu <4018+pyu10055@users.noreply.github.com>
---
tfjs-core/src/util_base.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tfjs-core/src/util_base.ts b/tfjs-core/src/util_base.ts
index cb7498b21df..45033f75e97 100644
--- a/tfjs-core/src/util_base.ts
+++ b/tfjs-core/src/util_base.ts
@@ -139,7 +139,7 @@ export function distSquared(a: FlatVector, b: FlatVector): number {
*
* ```js
* const x = 2;
- * tf.util.assert(x === 2, 'x is not 2');
+ * tf.util.assert(x === 2, () => 'x is not 2');
* ```
*
* @param expr The expression to assert (as a boolean).
From dc7261728faffaf48c2c5dd3fafed2b9361b8932 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Val=C3=A9rian=20Rousset?=
Date: Wed, 23 Apr 2025 20:59:09 +0200
Subject: [PATCH 30/33] [tfjs-core] do not hang on invalid browser files
(#8517)
`tf.io.browserFiles` doesn't fail when loading invalid files. or rather, it fails, but never rejects the promise, making the `IOHandler.load` hang forever.
wrapping the `JSON.parse` call in a try/catch and rejecting accordingly did the trick. and a small test to try it out.
note: `FileReader.readAsText()` is a callback way to go around reading files. the promise-based `Blob.text()` would make `BrowserFile.load` simpler and safer (but felt ouf-of-scope here).
---
tfjs-core/src/io/browser_files.ts | 11 +++++++++--
tfjs-core/src/io/browser_files_test.ts | 8 ++++++++
2 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/tfjs-core/src/io/browser_files.ts b/tfjs-core/src/io/browser_files.ts
index 816e00a0820..7079ab3f55f 100644
--- a/tfjs-core/src/io/browser_files.ts
+++ b/tfjs-core/src/io/browser_files.ts
@@ -139,8 +139,15 @@ class BrowserFiles implements IOHandler {
return new Promise((resolve, reject) => {
const jsonReader = new FileReader();
jsonReader.onload = (event: Event) => {
- // tslint:disable-next-line:no-any
- const modelJSON = JSON.parse((event.target as any).result) as ModelJSON;
+ let modelJSON: ModelJSON;
+ try {
+ // tslint:disable-next-line:no-any
+ modelJSON = JSON.parse((event.target as any).result);
+ } catch {
+ reject(new Error(`Failed to parse file ${
+ this.jsonFile.name}: {e.message}`));
+ return;
+ }
const modelTopology = modelJSON.modelTopology;
if (modelTopology == null) {
diff --git a/tfjs-core/src/io/browser_files_test.ts b/tfjs-core/src/io/browser_files_test.ts
index f48d15ebdeb..1a480cad627 100644
--- a/tfjs-core/src/io/browser_files_test.ts
+++ b/tfjs-core/src/io/browser_files_test.ts
@@ -677,4 +677,12 @@ describeWithFlags('browserFiles', BROWSER_ENVS, () => {
expect(() => tf.io.browserFiles(null)).toThrowError(/at least 1 file/);
expect(() => tf.io.browserFiles([])).toThrowError(/at least 1 file/);
});
+
+ it('Invalid JSON leads to Error', async () => {
+ const file = new File(['invalid'], 'model.json', {
+ type: 'application/json',
+ });
+ const filesHandler = tf.io.browserFiles([file]);
+ await expectAsync(filesHandler.load()).toBeRejectedWithError(/parse file/);
+ });
});
From 51577688f1c695db72ed75176875a2d3c8c3ea29 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Mon, 28 Apr 2025 09:07:00 -0700
Subject: [PATCH 31/33] Move nightly publishing tests and release branch tests
to github actions (#8555)
[Example nightly release and publish test](https://github.com/mattsoulanille/tfjs/actions/runs/14672995746/job/41183624273)
[Example release PR test](https://github.com/mattsoulanille/tfjs/actions/runs/14674243426/job/41187438555?pr=1)
---
.bazelrc | 1 +
.../tfjs-nightly-release-and-publish-test.yml | 52 ++++
.../tfjs-release-branch-publish-test.yml | 42 +++
e2e/custom_module/blazeface/yarn.lock | 8 +-
e2e/custom_module/dense_model/yarn.lock | 8 +-
.../universal_sentence_encoder/yarn.lock | 8 +-
e2e/karma.conf.js | 3 +-
.../tfjs-core-cpu/karma.conf.js | 1 +
e2e/script_tag_tests/tfjs/karma.conf.js | 1 +
e2e/scripts/create-python-models.sh | 11 +-
e2e/scripts/run-browserstack-tests.sh | 53 ----
e2e/scripts/run-custom-builds.sh | 14 +-
e2e/scripts/setup-py-env.sh | 11 +-
e2e/scripts/test-ci.sh | 21 +-
e2e/webpack_test/yarn.lock | 8 +-
e2e/yarn.lock | 70 ++---
scripts/publish-npm.ts | 3 +-
tfjs-node/yarn.lock | 268 ++++++++++++++----
tfjs/yarn.lock | 159 ++++++++++-
19 files changed, 534 insertions(+), 208 deletions(-)
create mode 100644 .github/workflows/tfjs-nightly-release-and-publish-test.yml
create mode 100644 .github/workflows/tfjs-release-branch-publish-test.yml
delete mode 100755 e2e/scripts/run-browserstack-tests.sh
diff --git a/.bazelrc b/.bazelrc
index 8da25af637e..49a44c6f8ef 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -46,6 +46,7 @@ build:rbe --config=remote
# Config for Google Cloud continuous integration that uses default credentials.
build:ci --config=bes
+
# This flag is needed to prevent the bazel cache from being invalidated when
# running bazel via `yarn bazel`.
# See https://github.com/angular/angular/issues/27514.
diff --git a/.github/workflows/tfjs-nightly-release-and-publish-test.yml b/.github/workflows/tfjs-nightly-release-and-publish-test.yml
new file mode 100644
index 00000000000..aae8c18e7db
--- /dev/null
+++ b/.github/workflows/tfjs-nightly-release-and-publish-test.yml
@@ -0,0 +1,52 @@
+name: TFJS Nightly Release and Publish Test
+
+on:
+ schedule:
+ - cron: '0 5 * * *' # Runs daily at 5:00 AM UTC
+ workflow_dispatch: # Allows manual triggering
+
+permissions:
+ contents: read # Default permissions, adjust if the script needs to write to the repo
+
+jobs:
+ nightly_release_verification:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+
+ - name: Setup Bazel
+ uses: bazel-contrib/setup-bazel@0.14.0
+ with:
+ bazelisk-cache: true
+ disk-cache: ${{ github.workflow }}-nightly-release
+ repository-cache: true
+
+ - name: Setup Node.js and Yarn
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20.x # Using a current LTS version of Node.js
+ cache: 'yarn'
+
+ - name: Install Yarn globally (if not already cached by setup-node with yarn cache)
+ run: npm i -g yarn
+
+ - name: Install top-level dependencies
+ run: yarn install --frozen-lockfile
+
+ - name: Run Nightly Verdaccio Test Script
+ env:
+ RELEASE: 'true' # Set RELEASE to true as in the original config
+ run: |
+ set -eEuo pipefail
+ yarn release-tfjs --dry --guess-version release --use-local-changes --force
+ # The original script changes directory to a temporary location created by the release script.
+ # This assumes /tmp/ is accessible and the path structure is consistent.
+ # If release-e2e.sh is relative to the checkout root after the release script prep, adjust path.
+ if [ -d "/tmp/tfjs-release/tfjs/e2e/" ]; then
+ cd /tmp/tfjs-release/tfjs/e2e/
+ bash scripts/release-e2e.sh
+ else
+ echo "Error: Expected directory /tmp/tfjs-release/tfjs/e2e/ not found after release script."
+ exit 1
+ fi
diff --git a/.github/workflows/tfjs-release-branch-publish-test.yml b/.github/workflows/tfjs-release-branch-publish-test.yml
new file mode 100644
index 00000000000..df51fa4acca
--- /dev/null
+++ b/.github/workflows/tfjs-release-branch-publish-test.yml
@@ -0,0 +1,42 @@
+name: TFJS Release Branch Publish Test
+
+on:
+ pull_request:
+ branches:
+ - 'tfjs_**' # Matches branches starting with tfjs_, e.g., tfjs_1.2.3, tfjs_core
+ workflow_dispatch: # Allows manual triggering
+
+permissions:
+ contents: read # Default permissions, adjust if the script needs to write to the repo
+
+jobs:
+ release_e2e_test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+
+ - name: Setup Bazel
+ uses: bazel-contrib/setup-bazel@0.14.0
+ with:
+ bazelisk-cache: true
+ disk-cache: ${{ github.workflow }}-release-e2e
+ repository-cache: true
+
+ - name: Setup Node.js and Yarn
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20.x
+ cache: 'yarn' # Changed from 'npm' in example to 'yarn' as primary tool here
+
+ - name: Install Yarn globally (if not already cached by setup-node with yarn cache)
+ run: npm i -g yarn
+
+ - name: Install top-level dependencies
+ run: yarn install --frozen-lockfile
+
+ - name: Run E2E Release Script
+ working-directory: ./e2e # Sets the directory for this step
+ env:
+ RELEASE: 'true' # Set RELEASE to true as requested
+ run: bash ./scripts/release-e2e.sh
diff --git a/e2e/custom_module/blazeface/yarn.lock b/e2e/custom_module/blazeface/yarn.lock
index 1173fe11b1b..f9a1dafc188 100644
--- a/e2e/custom_module/blazeface/yarn.lock
+++ b/e2e/custom_module/blazeface/yarn.lock
@@ -366,10 +366,10 @@
"@webassemblyjs/ast" "1.11.1"
"@xtuc/long" "4.2.2"
-"@webgpu/types@0.1.30":
- version "0.1.30"
- resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.30.tgz#b6406dc4a1c1e0d469028ceb30ddffbbd2fa706c"
- integrity sha512-9AXJSmL3MzY8ZL//JjudA//q+2kBRGhLBFpkdGksWIuxrMy81nFrCzj2Am+mbh8WoU6rXmv7cY5E3rdlyru2Qg==
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
"@webpack-cli/info@^1.1.0":
version "1.1.0"
diff --git a/e2e/custom_module/dense_model/yarn.lock b/e2e/custom_module/dense_model/yarn.lock
index ca6d72ed3b9..2602123d929 100644
--- a/e2e/custom_module/dense_model/yarn.lock
+++ b/e2e/custom_module/dense_model/yarn.lock
@@ -305,10 +305,10 @@
"@webassemblyjs/ast" "1.11.1"
"@xtuc/long" "4.2.2"
-"@webgpu/types@0.1.30":
- version "0.1.30"
- resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.30.tgz#b6406dc4a1c1e0d469028ceb30ddffbbd2fa706c"
- integrity sha512-9AXJSmL3MzY8ZL//JjudA//q+2kBRGhLBFpkdGksWIuxrMy81nFrCzj2Am+mbh8WoU6rXmv7cY5E3rdlyru2Qg==
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
"@webpack-cli/configtest@^1.1.1":
version "1.1.1"
diff --git a/e2e/custom_module/universal_sentence_encoder/yarn.lock b/e2e/custom_module/universal_sentence_encoder/yarn.lock
index e94b3433ee3..a91e1e05c1d 100644
--- a/e2e/custom_module/universal_sentence_encoder/yarn.lock
+++ b/e2e/custom_module/universal_sentence_encoder/yarn.lock
@@ -305,10 +305,10 @@
"@webassemblyjs/ast" "1.11.1"
"@xtuc/long" "4.2.2"
-"@webgpu/types@0.1.30":
- version "0.1.30"
- resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.30.tgz#b6406dc4a1c1e0d469028ceb30ddffbbd2fa706c"
- integrity sha512-9AXJSmL3MzY8ZL//JjudA//q+2kBRGhLBFpkdGksWIuxrMy81nFrCzj2Am+mbh8WoU6rXmv7cY5E3rdlyru2Qg==
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
"@webpack-cli/configtest@^1.1.1":
version "1.1.1"
diff --git a/e2e/karma.conf.js b/e2e/karma.conf.js
index e478c88bf17..8236e0c1e31 100644
--- a/e2e/karma.conf.js
+++ b/e2e/karma.conf.js
@@ -40,6 +40,7 @@ if (coverageEnabled) {
}
const devConfig = {
+ singleRun: true,
frameworks: ['jasmine', 'karma-typescript'],
files: [
{pattern: './node_modules/@babel/polyfill/dist/polyfill.js'},
@@ -148,7 +149,7 @@ module.exports = function(config) {
'spec',
'jasmine-order',
],
- browsers: ['Chrome'],
+ browsers: ['ChromeHeadless'],
browserStack: {
username: process.env.BROWSERSTACK_USERNAME,
accessKey: process.env.BROWSERSTACK_KEY,
diff --git a/e2e/script_tag_tests/tfjs-core-cpu/karma.conf.js b/e2e/script_tag_tests/tfjs-core-cpu/karma.conf.js
index 55bc70140d8..0a77208312e 100644
--- a/e2e/script_tag_tests/tfjs-core-cpu/karma.conf.js
+++ b/e2e/script_tag_tests/tfjs-core-cpu/karma.conf.js
@@ -28,6 +28,7 @@ module.exports = function(config) {
const devConfig = {
frameworks: ['jasmine'],
+ singleRun: true,
files: [
{
pattern: coreBundlePath,
diff --git a/e2e/script_tag_tests/tfjs/karma.conf.js b/e2e/script_tag_tests/tfjs/karma.conf.js
index 46146a61e25..e42e326f703 100644
--- a/e2e/script_tag_tests/tfjs/karma.conf.js
+++ b/e2e/script_tag_tests/tfjs/karma.conf.js
@@ -24,6 +24,7 @@ module.exports = function(config) {
const devConfig = {
frameworks: ['jasmine'],
+ singleRun: true,
files: [
{
pattern: tfjsBundlePath,
diff --git a/e2e/scripts/create-python-models.sh b/e2e/scripts/create-python-models.sh
index 80170bfa4a3..d5b6d0b94c9 100755
--- a/e2e/scripts/create-python-models.sh
+++ b/e2e/scripts/create-python-models.sh
@@ -23,9 +23,14 @@ cd integration_tests
source ../scripts/setup-py-env.sh --dev
-parallel ::: 'echo "Load equivalent keras models and generate outputs." && python create_save_predict.py' \
- 'echo "Create saved models and convert." && python convert_predict.py' \
- 'echo "Convert model with user defined metadata." && python metadata.py'
+echo "Load equivalent keras models and generate outputs."
+python create_save_predict.py
+
+echo "Create saved models and convert."
+python convert_predict.py
+
+echo "Convert model with user defined metadata."
+python metadata.py
# Cleanup python env.
source ../scripts/cleanup-py-env.sh
diff --git a/e2e/scripts/run-browserstack-tests.sh b/e2e/scripts/run-browserstack-tests.sh
deleted file mode 100755
index 8822421b5e9..00000000000
--- a/e2e/scripts/run-browserstack-tests.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-# This script runs browserstack tests on all configured browsers. It requires
-# the TAGS variable to be set in the environment.
-
-set -e
-
-# Smoke and regression tests run in PR and nightly builds.
-TAGS="#SMOKE,#REGRESSION"
-TAGS_WITH_GOLDEN="$TAGS,#GOLDEN"
-
-# Test macOS with smoke/regression tests.
-# Skip golden tests because they time out on browserstack (they work locally).
-# TODO(mattSoulanille): Make golden tests work on BrowserStack Mac.
-COMMANDS+=("yarn run-browserstack --browsers=bs_chrome_mac --tags '$TAGS'")
-
-# Test windows 10 with smoke/regression/golden tests.
-COMMANDS+=("yarn run-browserstack --browsers=win_10_chrome --tags '$TAGS_WITH_GOLDEN'")
-
-# Test script tag bundles
-COMMANDS+=("karma start ./script_tag_tests/tfjs/karma.conf.js --browserstack --browsers=bs_chrome_mac --testBundle tf.min.js")
-
-# Additional tests to run in nightly only.
-if [[ "$NIGHTLY" = true || "$RELEASE" = true ]]; then
- COMMANDS+=(
- "yarn run-browserstack --browsers=bs_ios_12 --tags '$TAGS' --testEnv webgl --flags '{\"\\"\"WEBGL_VERSION\"\\"\": 1, \"\\"\"WEBGL_CPU_FORWARD\"\\"\": false, \"\\"\"WEBGL_SIZE_UPLOAD_UNIFORM\"\\"\": 0}'"
- "yarn run-browserstack --browsers=bs_safari_mac --tags '$TAGS' --testEnv webgl --flags '{\"\\"\"WEBGL_VERSION\"\\"\": 1, \"\\"\"WEBGL_CPU_FORWARD\"\\"\": false, \"\\"\"WEBGL_SIZE_UPLOAD_UNIFORM\"\\"\": 0}'"
- "yarn run-browserstack --browsers=bs_firefox_mac --tags '$TAGS'"
- "yarn run-browserstack --browsers=bs_android_10 --tags '$TAGS'"
- # Test script tag bundles
- "karma start ./script_tag_tests/tfjs-core-cpu/karma.conf.js --browserstack --browsers=bs_chrome_mac"
- )
-fi
-
-for command in "${COMMANDS[@]}"; do
- TO_RUN+=("node ../scripts/run_flaky.js \"$command\"")
-done
-
-parallel ::: "${TO_RUN[@]}"
diff --git a/e2e/scripts/run-custom-builds.sh b/e2e/scripts/run-custom-builds.sh
index 033340dec5e..4b699a40651 100755
--- a/e2e/scripts/run-custom-builds.sh
+++ b/e2e/scripts/run-custom-builds.sh
@@ -15,12 +15,20 @@
# limitations under the License.
# ==============================================================================
+set -e
+
# Start in scripts/ even if run from root directory
cd "$(dirname "$0")"
+
# Go to e2e root
cd ..
-parallel ::: 'cd custom_module/blazeface && ./build.sh' \
- 'cd custom_module/dense_model && ./build.sh' \
- 'cd custom_module/universal_sentence_encoder && ./build.sh'
+echo "Building blazeface..."
+(cd custom_module/blazeface && ./build.sh)
+
+echo "Building dense_model..."
+(cd custom_module/dense_model && ./build.sh)
+
+echo "Building universal_sentence_encoder..."
+(cd custom_module/universal_sentence_encoder && ./build.sh)
diff --git a/e2e/scripts/setup-py-env.sh b/e2e/scripts/setup-py-env.sh
index 1897f678bfc..cdb29d4ec11 100755
--- a/e2e/scripts/setup-py-env.sh
+++ b/e2e/scripts/setup-py-env.sh
@@ -37,16 +37,13 @@ if [[ -z "${DEV_VERSION}" ]]; then
fi
VENV_DIR="$(mktemp -d)_venv"
-echo "Creating virtualenv at ${VENV_DIR} ..."
-PLATFORM="$(python -m platform)"
-if [[ $PLATFORM =~ "Windows" ]]
-then
- python -m virtualenv -p python3 "${VENV_DIR}"
- source "${VENV_DIR}/Scripts/activate"
+echo "Creating venv at ${VENV_DIR} ..."
+if ! which virtualenv > /dev/null 2>&1; then
+ python3 -m venv "${VENV_DIR}"
else
virtualenv -p python3 "${VENV_DIR}"
- source "${VENV_DIR}/bin/activate"
fi
+source "${VENV_DIR}/bin/activate"
# Install python packages.
if [[ "${DEV_VERSION}" == "stable" ]]; then
diff --git a/e2e/scripts/test-ci.sh b/e2e/scripts/test-ci.sh
index f622ef357f2..c9f7302f4d4 100755
--- a/e2e/scripts/test-ci.sh
+++ b/e2e/scripts/test-ci.sh
@@ -14,12 +14,21 @@
# limitations under the License.
# ==============================================================================
-set -e
+set -euxo pipefail
# Generate custom bundle files and model files for tests
-parallel ::: ./scripts/run-custom-builds.sh \
- ./scripts/create-python-models.sh
+./scripts/run-custom-builds.sh
+./scripts/create-python-models.sh
-# Run browserstack tests (and webpack test)
-parallel ::: ./scripts/run-browserstack-tests.sh \
- "cd webpack_test && yarn --mutex network && yarn build"
+TAGS='#SMOKE,#REGRESSION,#GOLDEN'
+
+# Test with smoke/regression tests.
+yarn karma start --single-run --tags "${TAGS}"
+
+# Test script tag bundles
+# Temporarily disabled
+# yarn karma start --single-run ./script_tag_tests/tfjs/karma.conf.js --testBundle tf.min.js --tags "${TAGS}"
+# yarn karma start --single-run ./script_tag_tests/tfjs-core-cpu/karma.conf.js --tags "${TAGS}"
+
+# Test webpack
+(cd webpack_test && yarn --mutex network && yarn build)
diff --git a/e2e/webpack_test/yarn.lock b/e2e/webpack_test/yarn.lock
index b3b39b2d928..9d61ed02fb0 100644
--- a/e2e/webpack_test/yarn.lock
+++ b/e2e/webpack_test/yarn.lock
@@ -222,10 +222,10 @@
"@webassemblyjs/ast" "1.11.1"
"@xtuc/long" "4.2.2"
-"@webgpu/types@0.1.30":
- version "0.1.30"
- resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.30.tgz#b6406dc4a1c1e0d469028ceb30ddffbbd2fa706c"
- integrity sha512-9AXJSmL3MzY8ZL//JjudA//q+2kBRGhLBFpkdGksWIuxrMy81nFrCzj2Am+mbh8WoU6rXmv7cY5E3rdlyru2Qg==
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
"@webpack-cli/configtest@^1.0.4":
version "1.0.4"
diff --git a/e2e/yarn.lock b/e2e/yarn.lock
index d7f3dd60e88..4ab949e7b0b 100644
--- a/e2e/yarn.lock
+++ b/e2e/yarn.lock
@@ -1101,10 +1101,10 @@
resolved "https://registry.yarnpkg.com/@types/seedrandom/-/seedrandom-2.4.30.tgz#d2efe425869b84163c2d56e779dddadb9372cbfa"
integrity sha512-AnxLHewubLVzoF/A4qdxBGHCKifw8cY32iro3DQX9TPcetE95zBeVt3jnsvtvAUf1vwzMfwzp4t/L2yqPlnjkQ==
-"@webgpu/types@0.1.30":
- version "0.1.30"
- resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.30.tgz#b6406dc4a1c1e0d469028ceb30ddffbbd2fa706c"
- integrity sha512-9AXJSmL3MzY8ZL//JjudA//q+2kBRGhLBFpkdGksWIuxrMy81nFrCzj2Am+mbh8WoU6rXmv7cY5E3rdlyru2Qg==
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
abbrev@1:
version "1.1.1"
@@ -1565,11 +1565,6 @@ chokidar@^3.5.1:
optionalDependencies:
fsevents "~2.3.2"
-chownr@^1.1.4:
- version "1.1.4"
- resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b"
- integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==
-
chownr@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece"
@@ -2236,13 +2231,6 @@ fs-extra@^10.0.1:
jsonfile "^6.0.1"
universalify "^2.0.0"
-fs-minipass@^1.2.7:
- version "1.2.7"
- resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.7.tgz#ccff8570841e7fe4265693da88936c55aed7f7c7"
- integrity sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==
- dependencies:
- minipass "^2.6.0"
-
fs-minipass@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb"
@@ -3100,14 +3088,6 @@ minimist@^1.2.6:
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44"
integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==
-minipass@^2.6.0, minipass@^2.9.0:
- version "2.9.0"
- resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6"
- integrity sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==
- dependencies:
- safe-buffer "^5.1.2"
- yallist "^3.0.0"
-
minipass@^3.0.0:
version "3.1.6"
resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.6.tgz#3b8150aa688a711a1521af5e8779c1d3bb4f45ee"
@@ -3115,12 +3095,10 @@ minipass@^3.0.0:
dependencies:
yallist "^4.0.0"
-minizlib@^1.3.3:
- version "1.3.3"
- resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d"
- integrity sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==
- dependencies:
- minipass "^2.9.0"
+minipass@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/minipass/-/minipass-5.0.0.tgz#3e9788ffb90b694a5d0ec94479a45b5d8738133d"
+ integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==
minizlib@^2.1.1:
version "2.1.2"
@@ -3608,7 +3586,7 @@ ripemd160@^2.0.0, ripemd160@^2.0.1:
hash-base "^3.0.0"
inherits "^2.0.1"
-safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0:
+safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@~5.2.0:
version "5.2.1"
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
@@ -3881,19 +3859,6 @@ supports-preserve-symlinks-flag@^1.0.0:
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
-tar@^4.4.6:
- version "4.4.19"
- resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3"
- integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==
- dependencies:
- chownr "^1.1.4"
- fs-minipass "^1.2.7"
- minipass "^2.9.0"
- minizlib "^1.3.3"
- mkdirp "^0.5.5"
- safe-buffer "^5.2.1"
- yallist "^3.1.1"
-
tar@^6.1.11:
version "6.1.11"
resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.11.tgz#6760a38f003afa1b2ffd0ffe9e9abbd0eab3d621"
@@ -3906,6 +3871,18 @@ tar@^6.1.11:
mkdirp "^1.0.3"
yallist "^4.0.0"
+tar@^6.2.1:
+ version "6.2.1"
+ resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a"
+ integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==
+ dependencies:
+ chownr "^2.0.0"
+ fs-minipass "^2.0.0"
+ minipass "^5.0.0"
+ minizlib "^2.1.1"
+ mkdirp "^1.0.3"
+ yallist "^4.0.0"
+
temp-fs@^0.9.9:
version "0.9.9"
resolved "https://registry.yarnpkg.com/temp-fs/-/temp-fs-0.9.9.tgz#8071730437870720e9431532fe2814364f8803d7"
@@ -4292,11 +4269,6 @@ y18n@^5.0.5:
resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55"
integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==
-yallist@^3.0.0, yallist@^3.1.1:
- version "3.1.1"
- resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd"
- integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==
-
yallist@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72"
diff --git a/scripts/publish-npm.ts b/scripts/publish-npm.ts
index 4aaa696c5a8..2db56e2a11a 100755
--- a/scripts/publish-npm.ts
+++ b/scripts/publish-npm.ts
@@ -350,7 +350,8 @@ async function main() {
const bazelArgs = ['bazel', 'build']
if (args.ci) {
- bazelArgs.push('--config=ci');
+ // Disabled for now since github actions don't have a gcp key currently.
+ // bazelArgs.push('--config=ci');
}
// Use child_process.spawnSync to show bazel build progress.
const result = child_process.spawnSync('yarn',
diff --git a/tfjs-node/yarn.lock b/tfjs-node/yarn.lock
index 27383fd7b84..41fca7003e9 100644
--- a/tfjs-node/yarn.lock
+++ b/tfjs-node/yarn.lock
@@ -227,6 +227,9 @@
semver "^7.3.5"
tar "^6.1.11"
+"@tensorflow/tfjs-backend-cpu@link:../link-package/node_modules/@tensorflow/link-package/node_modules/@tensorflow/tfjs-backend-cpu":
+ version "0.0.0"
+
"@tensorflow/tfjs-backend-cpu@link:../link-package/node_modules/@tensorflow/tfjs-backend-cpu":
version "0.0.0"
uid ""
@@ -268,11 +271,24 @@
resolved "https://registry.yarnpkg.com/@types/jasmine/-/jasmine-4.0.3.tgz#097ce710d70eb7f3662e96c1f75824dd22c27d5c"
integrity sha512-Opp1LvvEuZdk8fSSvchK2mZwhVrsNT0JgJE9Di6MjnaIpmEXM8TLCPPrVtNTYh8+5MPdY8j9bAHMu2SSfwpZJg==
+"@types/long@^4.0.1":
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.2.tgz#b74129719fc8d11c01868010082d483b7545591a"
+ integrity sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==
+
"@types/minimatch@*":
version "3.0.4"
resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.4.tgz#f0ec25dbf2f0e4b18647313ac031134ca5b24b21"
integrity sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA==
+"@types/node-fetch@^2.1.2":
+ version "2.6.12"
+ resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.12.tgz#8ab5c3ef8330f13100a7479e2cd56d3386830a03"
+ integrity sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==
+ dependencies:
+ "@types/node" "*"
+ form-data "^4.0.0"
+
"@types/node@*":
version "14.14.36"
resolved "https://registry.yarnpkg.com/@types/node/-/node-14.14.36.tgz#5637905dbb15c30a33a3c65b9ef7c20e3c85ebad"
@@ -283,6 +299,16 @@
resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.55.tgz#a147f282edec679b894d4694edb5abeb595fecbd"
integrity sha512-koZJ89uLZufDvToeWO5BrC4CR4OUfHnUz2qoPs/daQH6qq3IN62QFxCTZ+bKaCE0xaoCAJYE4AXre8AbghCrhg==
+"@types/offscreencanvas@~2019.3.0":
+ version "2019.3.0"
+ resolved "https://registry.yarnpkg.com/@types/offscreencanvas/-/offscreencanvas-2019.3.0.tgz#3336428ec7e9180cf4566dfea5da04eb586a6553"
+ integrity sha512-esIJx9bQg+QYF0ra8GnvfianIY8qWB0GBx54PK5Eps6m+xTj86KLavHv6qDhzKcu5UUOgNfJ2pWaIIV7TRUd9Q==
+
+"@types/offscreencanvas@~2019.7.0":
+ version "2019.7.3"
+ resolved "https://registry.yarnpkg.com/@types/offscreencanvas/-/offscreencanvas-2019.7.3.tgz#90267db13f64d6e9ccb5ae3eac92786a7c77a516"
+ integrity sha512-ieXiYmgSRXUDeOntE1InxjWyvEelZGP63M+cGuquuRLuIKKT1osnkXjxev9B7d1nXSug5vpunx+gNlbVxMlC9A==
+
"@types/progress@^2.0.1":
version "2.0.3"
resolved "https://registry.yarnpkg.com/@types/progress/-/progress-2.0.3.tgz#7ccbd9c6d4d601319126c469e73b5bb90dfc8ccc"
@@ -298,6 +324,11 @@
"@types/glob" "*"
"@types/node" "*"
+"@types/seedrandom@^2.4.28":
+ version "2.4.34"
+ resolved "https://registry.yarnpkg.com/@types/seedrandom/-/seedrandom-2.4.34.tgz#c725cd0fc0442e2d3d0e5913af005686ffb7eb99"
+ integrity sha512-ytDiArvrn/3Xk6/vtylys5tlY6eo7Ane0hvcx++TKo6RxQXuVfW0AF/oeWqAj9dN29SyhtawuXstgmPlwNcv/A==
+
"@types/yargs-parser@*":
version "20.2.0"
resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-20.2.0.tgz#dd3e6699ba3237f0348cd085e4698780204842f9"
@@ -310,6 +341,11 @@
dependencies:
"@types/yargs-parser" "*"
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
+
abbrev@1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8"
@@ -403,6 +439,11 @@ async@^3.2.3:
resolved "https://registry.yarnpkg.com/async/-/async-3.2.3.tgz#ac53dafd3f4720ee9e8a160628f18ea91df196c9"
integrity sha512-spZRyzKL5l5BZQrr/6m/SqFdBN0q3OCI0f9rjfBzCMBIP4p75P620rR3gTmaksNOhmzgdxcaxdNfMy6anrbM0g==
+asynckit@^0.4.0:
+ version "0.4.0"
+ resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+ integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
+
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
@@ -447,6 +488,14 @@ caching-transform@^4.0.0:
package-hash "^4.0.0"
write-file-atomic "^3.0.0"
+call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6"
+ integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==
+ dependencies:
+ es-errors "^1.3.0"
+ function-bind "^1.1.2"
+
camelcase@^5.0.0, camelcase@^5.3.1:
version "5.3.1"
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
@@ -474,11 +523,6 @@ chalk@^4.1.0:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
-chownr@^1.1.4:
- version "1.1.4"
- resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b"
- integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==
-
chownr@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece"
@@ -545,6 +589,13 @@ color-support@^1.1.2:
resolved "https://registry.yarnpkg.com/color-support/-/color-support-1.1.3.tgz#93834379a1cc9a0c61f82f52f0d04322251bd5a2"
integrity sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==
+combined-stream@^1.0.8:
+ version "1.0.8"
+ resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
+ integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
+ dependencies:
+ delayed-stream "~1.0.0"
+
commander@^2.12.1:
version "2.20.3"
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
@@ -612,6 +663,11 @@ default-require-extensions@^3.0.0:
dependencies:
strip-bom "^4.0.0"
+delayed-stream@~1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+ integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
+
delegates@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
@@ -632,6 +688,15 @@ diff@^4.0.1:
resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d"
integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==
+dunder-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a"
+ integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==
+ dependencies:
+ call-bind-apply-helpers "^1.0.1"
+ es-errors "^1.3.0"
+ gopd "^1.2.0"
+
electron-to-chromium@^1.4.76:
version "1.4.82"
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.82.tgz#51e123ca434b1eba8c434ece2b54f095b304a651"
@@ -642,6 +707,33 @@ emoji-regex@^8.0.0:
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37"
integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
+es-define-property@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa"
+ integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==
+
+es-errors@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
+ integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
+
+es-object-atoms@^1.0.0, es-object-atoms@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1"
+ integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==
+ dependencies:
+ es-errors "^1.3.0"
+
+es-set-tostringtag@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d"
+ integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==
+ dependencies:
+ es-errors "^1.3.0"
+ get-intrinsic "^1.2.6"
+ has-tostringtag "^1.0.2"
+ hasown "^2.0.2"
+
es6-error@^4.0.1:
version "4.1.1"
resolved "https://registry.yarnpkg.com/es6-error/-/es6-error-4.1.1.tgz#9e3af407459deed47e9a91f9b885a84eb05c561d"
@@ -699,6 +791,16 @@ foreground-child@^2.0.0:
cross-spawn "^7.0.0"
signal-exit "^3.0.2"
+form-data@^4.0.0:
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.2.tgz#35cabbdd30c3ce73deb2c42d3c8d3ed9ca51794c"
+ integrity sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==
+ dependencies:
+ asynckit "^0.4.0"
+ combined-stream "^1.0.8"
+ es-set-tostringtag "^2.1.0"
+ mime-types "^2.1.12"
+
fromentries@^1.2.0:
version "1.3.2"
resolved "https://registry.yarnpkg.com/fromentries/-/fromentries-1.3.2.tgz#e4bca6808816bf8f93b52750f1127f5a6fd86e3a"
@@ -713,13 +815,6 @@ fs-extra@^8.0.1:
jsonfile "^4.0.0"
universalify "^0.1.0"
-fs-minipass@^1.2.7:
- version "1.2.7"
- resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.7.tgz#ccff8570841e7fe4265693da88936c55aed7f7c7"
- integrity sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==
- dependencies:
- minipass "^2.6.0"
-
fs-minipass@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb"
@@ -737,6 +832,11 @@ function-bind@^1.1.1:
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
+function-bind@^1.1.2:
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c"
+ integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==
+
gauge@^3.0.0:
version "3.0.2"
resolved "https://registry.yarnpkg.com/gauge/-/gauge-3.0.2.tgz#03bf4441c044383908bcfa0656ad91803259b395"
@@ -762,11 +862,35 @@ get-caller-file@^2.0.1, get-caller-file@^2.0.5:
resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
+get-intrinsic@^1.2.6:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01"
+ integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==
+ dependencies:
+ call-bind-apply-helpers "^1.0.2"
+ es-define-property "^1.0.1"
+ es-errors "^1.3.0"
+ es-object-atoms "^1.1.1"
+ function-bind "^1.1.2"
+ get-proto "^1.0.1"
+ gopd "^1.2.0"
+ has-symbols "^1.1.0"
+ hasown "^2.0.2"
+ math-intrinsics "^1.1.0"
+
get-package-type@^0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a"
integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==
+get-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1"
+ integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==
+ dependencies:
+ dunder-proto "^1.0.1"
+ es-object-atoms "^1.0.0"
+
glob@^7.0.0, glob@^7.1.1, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6:
version "7.1.6"
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6"
@@ -789,6 +913,11 @@ google-protobuf@^3.9.2:
resolved "https://registry.yarnpkg.com/google-protobuf/-/google-protobuf-3.15.6.tgz#2048055828530993a51df4d4ca2c08322fc1ec7c"
integrity sha512-p65NyhIZFHFUxbIPOm6cygg2rCjK+2uDCxruOG3RaWKM9R4rBGX0STmlJoSOhoyAG8Fha7U8FP4pQomAV1JXsA==
+gopd@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1"
+ integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==
+
graceful-fs@^4.1.15, graceful-fs@^4.1.6, graceful-fs@^4.2.0:
version "4.2.6"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee"
@@ -804,6 +933,18 @@ has-flag@^4.0.0:
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
+has-symbols@^1.0.3, has-symbols@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338"
+ integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==
+
+has-tostringtag@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
+ integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==
+ dependencies:
+ has-symbols "^1.0.3"
+
has-unicode@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
@@ -824,6 +965,13 @@ hasha@^5.0.0:
is-stream "^2.0.0"
type-fest "^0.8.0"
+hasown@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003"
+ integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==
+ dependencies:
+ function-bind "^1.1.2"
+
html-escaper@^2.0.0:
version "2.0.2"
resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453"
@@ -1038,6 +1186,11 @@ lodash@^4.17.19:
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
+long@4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28"
+ integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==
+
lru-cache@^6.0.0:
version "6.0.0"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94"
@@ -1057,6 +1210,23 @@ make-error@^1.1.1:
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2"
integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==
+math-intrinsics@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9"
+ integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==
+
+mime-db@1.52.0:
+ version "1.52.0"
+ resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
+ integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+
+mime-types@^2.1.12:
+ version "2.1.35"
+ resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
minimatch@^3.0.4:
version "3.1.2"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"
@@ -1064,19 +1234,11 @@ minimatch@^3.0.4:
dependencies:
brace-expansion "^1.1.7"
-minimist@^1.2.5, minimist@^1.2.6:
+minimist@^1.2.5:
version "1.2.6"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44"
integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==
-minipass@^2.6.0, minipass@^2.9.0:
- version "2.9.0"
- resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6"
- integrity sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==
- dependencies:
- safe-buffer "^5.1.2"
- yallist "^3.0.0"
-
minipass@^3.0.0:
version "3.1.3"
resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.3.tgz#7d42ff1f39635482e15f9cdb53184deebd5815fd"
@@ -1084,12 +1246,10 @@ minipass@^3.0.0:
dependencies:
yallist "^4.0.0"
-minizlib@^1.3.3:
- version "1.3.3"
- resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d"
- integrity sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==
- dependencies:
- minipass "^2.9.0"
+minipass@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/minipass/-/minipass-5.0.0.tgz#3e9788ffb90b694a5d0ec94479a45b5d8738133d"
+ integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==
minizlib@^2.1.1:
version "2.1.2"
@@ -1106,13 +1266,6 @@ mkdirp@^0.5.3:
dependencies:
minimist "^1.2.5"
-mkdirp@^0.5.5:
- version "0.5.6"
- resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6"
- integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==
- dependencies:
- minimist "^1.2.6"
-
mkdirp@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e"
@@ -1373,16 +1526,21 @@ rimraf@^3.0.0, rimraf@^3.0.2:
dependencies:
glob "^7.1.3"
-safe-buffer@^5.1.2, safe-buffer@^5.2.1, safe-buffer@~5.2.0:
- version "5.2.1"
- resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
- integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
-
safe-buffer@~5.1.1:
version "5.1.2"
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
+safe-buffer@~5.2.0:
+ version "5.2.1"
+ resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
+ integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
+
+seedrandom@^3.0.5:
+ version "3.0.5"
+ resolved "https://registry.yarnpkg.com/seedrandom/-/seedrandom-3.0.5.tgz#54edc85c95222525b0c7a6f6b3543d8e0b3aa0a7"
+ integrity sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==
+
semver@^5.3.0:
version "5.7.1"
resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7"
@@ -1475,7 +1633,7 @@ string-width@^4.1.0, string-width@^4.2.0:
is-fullwidth-code-point "^3.0.0"
strip-ansi "^6.0.0"
-string_decoder@^1.1.1:
+string_decoder@^1.1.1, string_decoder@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e"
integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
@@ -1515,19 +1673,6 @@ supports-color@^7.1.0:
dependencies:
has-flag "^4.0.0"
-tar@^4.4.6:
- version "4.4.19"
- resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3"
- integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==
- dependencies:
- chownr "^1.1.4"
- fs-minipass "^1.2.7"
- minipass "^2.9.0"
- minizlib "^1.3.3"
- mkdirp "^0.5.5"
- safe-buffer "^5.2.1"
- yallist "^3.1.1"
-
tar@^6.1.11:
version "6.1.11"
resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.11.tgz#6760a38f003afa1b2ffd0ffe9e9abbd0eab3d621"
@@ -1540,6 +1685,18 @@ tar@^6.1.11:
mkdirp "^1.0.3"
yallist "^4.0.0"
+tar@^6.2.1:
+ version "6.2.1"
+ resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a"
+ integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==
+ dependencies:
+ chownr "^2.0.0"
+ fs-minipass "^2.0.0"
+ minipass "^5.0.0"
+ minizlib "^2.1.1"
+ mkdirp "^1.0.3"
+ yallist "^4.0.0"
+
test-exclude@^6.0.0:
version "6.0.0"
resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e"
@@ -1734,11 +1891,6 @@ yalc@~1.0.0-pre.50:
npm-packlist "^1.4.1"
yargs "^16.1.1"
-yallist@^3.0.0, yallist@^3.1.1:
- version "3.1.1"
- resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd"
- integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==
-
yallist@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72"
diff --git a/tfjs/yarn.lock b/tfjs/yarn.lock
index 5dbc13b3f00..d64914b66c1 100644
--- a/tfjs/yarn.lock
+++ b/tfjs/yarn.lock
@@ -1976,6 +1976,14 @@
resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.4.tgz#f0ec25dbf2f0e4b18647313ac031134ca5b24b21"
integrity sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA==
+"@types/node-fetch@^2.1.2":
+ version "2.6.12"
+ resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.12.tgz#8ab5c3ef8330f13100a7479e2cd56d3386830a03"
+ integrity sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==
+ dependencies:
+ "@types/node" "*"
+ form-data "^4.0.0"
+
"@types/node@*", "@types/node@>=10.0.0":
version "18.11.9"
resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.9.tgz#02d013de7058cea16d36168ef2fc653464cfbad4"
@@ -2026,10 +2034,10 @@
dependencies:
"@types/yargs-parser" "*"
-"@webgpu/types@0.1.30":
- version "0.1.30"
- resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.30.tgz#b6406dc4a1c1e0d469028ceb30ddffbbd2fa706c"
- integrity sha512-9AXJSmL3MzY8ZL//JjudA//q+2kBRGhLBFpkdGksWIuxrMy81nFrCzj2Am+mbh8WoU6rXmv7cY5E3rdlyru2Qg==
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
accepts@~1.3.4:
version "1.3.8"
@@ -2145,6 +2153,11 @@ async@^3.0.1:
resolved "https://registry.yarnpkg.com/async/-/async-3.2.0.tgz#b3a2685c5ebb641d3de02d161002c60fc9f85720"
integrity sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==
+asynckit@^0.4.0:
+ version "0.4.0"
+ resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+ integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
+
available-typed-arrays@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.2.tgz#6b098ca9d8039079ee3f77f7b783c4480ba513f5"
@@ -2433,6 +2446,14 @@ bytes@3.1.0:
resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6"
integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==
+call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6"
+ integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==
+ dependencies:
+ es-errors "^1.3.0"
+ function-bind "^1.1.2"
+
call-bind@^1.0.0, call-bind@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c"
@@ -2573,6 +2594,13 @@ combine-source-map@^0.8.0:
lodash.memoize "~3.0.3"
source-map "~0.5.3"
+combined-stream@^1.0.8:
+ version "1.0.8"
+ resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
+ integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
+ dependencies:
+ delayed-stream "~1.0.0"
+
commander@^2.12.1, commander@^2.20.0:
version "2.20.3"
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
@@ -2782,6 +2810,11 @@ define-properties@^1.1.3:
dependencies:
object-keys "^1.0.12"
+delayed-stream@~1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+ integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
+
depd@~1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9"
@@ -2839,6 +2872,15 @@ domain-browser@^4.16.0:
resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-4.19.0.tgz#1093e17c0a17dbd521182fe90d49ac1370054af1"
integrity sha512-fRA+BaAWOR/yr/t7T9E9GJztHPeFjj8U35ajyAjCDtAAnTn1Rc1f6W6VGPJrO1tkQv9zWu+JRof7z6oQtiYVFQ==
+dunder-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a"
+ integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==
+ dependencies:
+ call-bind-apply-helpers "^1.0.1"
+ es-errors "^1.3.0"
+ gopd "^1.2.0"
+
duplexer@~0.1.1:
version "0.1.2"
resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.2.tgz#3abe43aef3835f8ae077d136ddce0f276b0400e6"
@@ -2937,6 +2979,33 @@ es-abstract@^1.18.0-next.1, es-abstract@^1.18.0-next.2:
string.prototype.trimstart "^1.0.4"
unbox-primitive "^1.0.0"
+es-define-property@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa"
+ integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==
+
+es-errors@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
+ integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
+
+es-object-atoms@^1.0.0, es-object-atoms@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1"
+ integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==
+ dependencies:
+ es-errors "^1.3.0"
+
+es-set-tostringtag@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d"
+ integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==
+ dependencies:
+ es-errors "^1.3.0"
+ get-intrinsic "^1.2.6"
+ has-tostringtag "^1.0.2"
+ hasown "^2.0.2"
+
es-to-primitive@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a"
@@ -3064,6 +3133,16 @@ foreach@^2.0.5:
resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99"
integrity sha1-C+4AUBiusmDQo6865ljdATbsG5k=
+form-data@^4.0.0:
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.2.tgz#35cabbdd30c3ce73deb2c42d3c8d3ed9ca51794c"
+ integrity sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==
+ dependencies:
+ asynckit "^0.4.0"
+ combined-stream "^1.0.8"
+ es-set-tostringtag "^2.1.0"
+ mime-types "^2.1.12"
+
from@~0:
version "0.1.7"
resolved "https://registry.yarnpkg.com/from/-/from-0.1.7.tgz#83c60afc58b9c56997007ed1a768b3ab303a44fe"
@@ -3109,6 +3188,11 @@ function-bind@^1.1.1:
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
+function-bind@^1.1.2:
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c"
+ integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==
+
gensync@^1.0.0-beta.2:
version "1.0.0-beta.2"
resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0"
@@ -3128,6 +3212,30 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1:
has "^1.0.3"
has-symbols "^1.0.1"
+get-intrinsic@^1.2.6:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01"
+ integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==
+ dependencies:
+ call-bind-apply-helpers "^1.0.2"
+ es-define-property "^1.0.1"
+ es-errors "^1.3.0"
+ es-object-atoms "^1.1.1"
+ function-bind "^1.1.2"
+ get-proto "^1.0.1"
+ gopd "^1.2.0"
+ has-symbols "^1.1.0"
+ hasown "^2.0.2"
+ math-intrinsics "^1.1.0"
+
+get-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1"
+ integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==
+ dependencies:
+ dunder-proto "^1.0.1"
+ es-object-atoms "^1.0.0"
+
glob-parent@~5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4"
@@ -3175,6 +3283,11 @@ globals@^11.1.0:
resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e"
integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
+gopd@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1"
+ integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==
+
graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.6:
version "4.2.9"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.9.tgz#041b05df45755e587a24942279b9d113146e1c96"
@@ -3207,6 +3320,18 @@ has-symbols@^1.0.1, has-symbols@^1.0.2:
resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.2.tgz#165d3070c00309752a1236a479331e3ac56f1423"
integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==
+has-symbols@^1.0.3, has-symbols@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338"
+ integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==
+
+has-tostringtag@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
+ integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==
+ dependencies:
+ has-symbols "^1.0.3"
+
has@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796"
@@ -3231,6 +3356,13 @@ hash.js@^1.0.0, hash.js@^1.0.3:
inherits "^2.0.3"
minimalistic-assert "^1.0.1"
+hasown@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003"
+ integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==
+ dependencies:
+ function-bind "^1.1.2"
+
hmac-drbg@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1"
@@ -3851,6 +3983,11 @@ map-stream@~0.1.0:
resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.1.0.tgz#e56aa94c4c8055a16404a0674b78f215f7c8e194"
integrity sha1-5WqpTEyAVaFkBKBnS3jyFffI4ZQ=
+math-intrinsics@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9"
+ integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==
+
md5.js@^1.3.4:
version "1.3.5"
resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f"
@@ -3893,6 +4030,13 @@ mime-db@1.52.0:
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+mime-types@^2.1.12, mime-types@~2.1.34:
+ version "2.1.35"
+ resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
mime-types@~2.1.24:
version "2.1.34"
resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.34.tgz#5a712f9ec1503511a945803640fafe09d3793c24"
@@ -3900,13 +4044,6 @@ mime-types@~2.1.24:
dependencies:
mime-db "1.51.0"
-mime-types@~2.1.34:
- version "2.1.35"
- resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
- integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
- dependencies:
- mime-db "1.52.0"
-
mime@^2.5.2:
version "2.6.0"
resolved "https://registry.yarnpkg.com/mime/-/mime-2.6.0.tgz#a2a682a95cd4d0cb1d6257e28f83da7e35800367"
From 0ef019b66dafe955dceaf00cf622358bd30c91ec Mon Sep 17 00:00:00 2001
From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com>
Date: Fri, 30 May 2025 04:09:13 +0530
Subject: [PATCH 32/33] Add NAPI-v9 support to tfjs-node-gpu package (#8547)
* Add NAPI-v9 support to tfjs-node-gpu package
* Update napi version to 9 for tfjs-node
---
tfjs-node-gpu/package.json | 3 ++-
tfjs-node/package.json | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/tfjs-node-gpu/package.json b/tfjs-node-gpu/package.json
index e1773f308c4..ba5dd8275b1 100644
--- a/tfjs-node-gpu/package.json
+++ b/tfjs-node-gpu/package.json
@@ -83,7 +83,8 @@
5,
6,
7,
- 8
+ 8,
+ 9
]
}
}
diff --git a/tfjs-node/package.json b/tfjs-node/package.json
index 46d0c99e09a..de8c0a2fa39 100644
--- a/tfjs-node/package.json
+++ b/tfjs-node/package.json
@@ -81,7 +81,8 @@
5,
6,
7,
- 8
+ 8,
+ 9
]
}
}
From 0fc04d958ea592f3b8db79a8b3b497b5c8904097 Mon Sep 17 00:00:00 2001
From: Matthew Soulanille
Date: Fri, 6 Jun 2025 13:55:10 -0700
Subject: [PATCH 33/33] Remove hub from tests (#8563)
---
.../python/test_nightly_pip_package.py | 21 --------
tfjs-converter/python/test_pip_package.py | 49 -------------------
tfjs-converter/yarn.lock | 40 +++++++++++++--
3 files changed, 36 insertions(+), 74 deletions(-)
diff --git a/tfjs-converter/python/test_nightly_pip_package.py b/tfjs-converter/python/test_nightly_pip_package.py
index bfe64678e09..1d55d3bc72f 100644
--- a/tfjs-converter/python/test_nightly_pip_package.py
+++ b/tfjs-converter/python/test_nightly_pip_package.py
@@ -47,27 +47,6 @@ def tearDown(self):
shutil.rmtree(self._tmp_dir)
super(APIAndShellTest, self).tearDown()
- def testConvertTfHubMobileNetV2ToTfjsGraphModel(self):
- # 1. Convert tfhub mobilenet v2 module.
- tfhub_url = (
- 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_224'
- '/classification/3'
- )
- graph_model_output_dir = os.path.join(self._tmp_dir, 'tfjs_graph')
- process = subprocess.Popen([
- 'tensorflowjs_converter', '--input_format', 'tf_hub',
- tfhub_url, graph_model_output_dir
- ])
- process.communicate()
- self.assertEqual(0, process.returncode)
-
- # 2. Check the files that belong to the conversion result.
- files = glob.glob(os.path.join(graph_model_output_dir, '*'))
- self.assertIn(os.path.join(graph_model_output_dir, 'model.json'), files)
- weight_files = glob.glob(
- os.path.join(graph_model_output_dir, 'group*.bin'))
- self.assertEqual(len(weight_files), 4)
-
def testConvertMobileNetV2ModelToTfjsGraphModel(self):
"""create the keras mobilenet v2 model."""
# 1. Create a saved model from keras mobilenet v2.
diff --git a/tfjs-converter/python/test_pip_package.py b/tfjs-converter/python/test_pip_package.py
index bf76d0a14c3..42cdd12b1fe 100644
--- a/tfjs-converter/python/test_pip_package.py
+++ b/tfjs-converter/python/test_pip_package.py
@@ -36,7 +36,6 @@
from tensorflow.python.tools import freeze_graph
from tensorflow.python.trackable import autotrackable
from tensorflow.python.saved_model.save import save
-import tensorflow_hub as hub
import tensorflowjs as tfjs
@@ -124,27 +123,6 @@ def _createTensorFlowSavedModel(save_path):
save(root, save_path, to_save)
-
-def _create_hub_module(save_path):
- """Create a TensorFlow Hub module for testing.
-
- Args:
- save_path: The directory path in which to save the model.
- """
- # Module function that doubles its input.
- def double_module_fn():
- w = tf.Variable([2.0, 4.0])
- x = tf.compat.v1.placeholder(dtype=tf.float32)
- hub.add_signature(inputs=x, outputs=x*w)
- graph = tf.Graph()
- with graph.as_default():
- spec = hub.create_module_spec(double_module_fn)
- m = hub.Module(spec)
- # Export the module.
- with tf.compat.v1.Session(graph=graph) as sess:
- sess.run(tf.compat.v1.global_variables_initializer())
- m.export(save_path, sess)
-
def _create_frozen_model(save_path):
graph = tf.Graph()
saved_model_dir = os.path.join(save_path)
@@ -198,7 +176,6 @@ def setUpClass(cls):
_createTensorFlowSavedModelV1('b', cls.tf_saved_model_v1_dir)
_create_frozen_model(cls.tf_frozen_model_dir)
cls.tf_hub_module_dir = os.path.join(cls.class_tmp_dir, 'tf_hub_module')
- _create_hub_module(cls.tf_hub_module_dir)
@classmethod
def tearDownClass(cls):
@@ -456,32 +433,6 @@ def testConvertTFSavedModelV1WithCommandLineWorks(self):
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
-
- def testConvertTFHubModuleWithCommandLineWorks(self):
- output_dir = os.path.join(self._tmp_dir)
- process = subprocess.Popen([
- 'tensorflowjs_converter', '--input_format', 'tf_hub',
- self.tf_hub_module_dir, output_dir
- ])
- process.communicate()
- self.assertEqual(0, process.returncode)
-
- weights = [{
- 'paths': ['group1-shard1of1.bin'],
- 'weights': [{
- 'shape': [2],
- 'name': 'module/Variable',
- 'dtype': 'float32'
- }]
- }]
- # Load the saved weights as a JSON string.
- output_json = json.load(
- open(os.path.join(output_dir, 'model.json'), 'rt'))
- self.assertEqual(output_json['weightsManifest'], weights)
-
- # Check the content of the output directory.
- self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
-
def testConvertTFSavedModelWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
diff --git a/tfjs-converter/yarn.lock b/tfjs-converter/yarn.lock
index a0535ff08b1..a604d0acc5a 100644
--- a/tfjs-converter/yarn.lock
+++ b/tfjs-converter/yarn.lock
@@ -67,15 +67,22 @@
"@tensorflow/tfjs-backend-cpu@link:../link-package/node_modules/@tensorflow/tfjs-backend-cpu":
version "0.0.0"
+ uid ""
"@tensorflow/tfjs-core@link:../link-package/node_modules/@tensorflow/tfjs-core":
version "0.0.0"
+ uid ""
"@types/argparse@^1.0.38":
version "1.0.38"
resolved "https://registry.yarnpkg.com/@types/argparse/-/argparse-1.0.38.tgz#a81fd8606d481f873a3800c6ebae4f1d768a56a9"
integrity sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==
+"@types/long@^4.0.1":
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.2.tgz#b74129719fc8d11c01868010082d483b7545591a"
+ integrity sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==
+
"@types/long@~3.0.32":
version "3.0.32"
resolved "https://registry.yarnpkg.com/@types/long/-/long-3.0.32.tgz#f4e5af31e9e9b196d8e5fca8a5e2e20aa3d60b69"
@@ -93,6 +100,21 @@
resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.38.tgz#f8bb07c371ccb1903f3752872c89f44006132947"
integrity sha512-5jY9RhV7c0Z4Jy09G+NIDTsCZ5G0L5n+Z+p+Y7t5VJHM30bgwzSjVtlcBxqAj+6L/swIlvtOSzr8rBk/aNyV2g==
+"@types/offscreencanvas@~2019.7.0":
+ version "2019.7.3"
+ resolved "https://registry.yarnpkg.com/@types/offscreencanvas/-/offscreencanvas-2019.7.3.tgz#90267db13f64d6e9ccb5ae3eac92786a7c77a516"
+ integrity sha512-ieXiYmgSRXUDeOntE1InxjWyvEelZGP63M+cGuquuRLuIKKT1osnkXjxev9B7d1nXSug5vpunx+gNlbVxMlC9A==
+
+"@types/seedrandom@^2.4.28":
+ version "2.4.34"
+ resolved "https://registry.yarnpkg.com/@types/seedrandom/-/seedrandom-2.4.34.tgz#c725cd0fc0442e2d3d0e5913af005686ffb7eb99"
+ integrity sha512-ytDiArvrn/3Xk6/vtylys5tlY6eo7Ane0hvcx++TKo6RxQXuVfW0AF/oeWqAj9dN29SyhtawuXstgmPlwNcv/A==
+
+"@webgpu/types@0.1.38":
+ version "0.1.38"
+ resolved "https://registry.yarnpkg.com/@webgpu/types/-/types-0.1.38.tgz#6fda4b410edc753d3213c648320ebcf319669020"
+ integrity sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==
+
ansi-regex@^5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
@@ -277,6 +299,11 @@ jsonfile@^4.0.0:
optionalDependencies:
graceful-fs "^4.1.6"
+long@4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28"
+ integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==
+
long@^5.0.0:
version "5.2.3"
resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1"
@@ -370,6 +397,11 @@ require-directory@^2.1.1:
resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==
+seedrandom@^3.0.5:
+ version "3.0.5"
+ resolved "https://registry.yarnpkg.com/seedrandom/-/seedrandom-3.0.5.tgz#54edc85c95222525b0c7a6f6b3543d8e0b3aa0a7"
+ integrity sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==
+
source-map-support@^0.5.6:
version "0.5.19"
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61"
@@ -427,10 +459,10 @@ ts-node@~8.8.2:
source-map-support "^0.5.6"
yn "3.1.1"
-typescript@4.9.4:
- version "4.9.4"
- resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.4.tgz#a2a3d2756c079abda241d75f149df9d561091e78"
- integrity sha512-Uz+dTXYzxXXbsFpM86Wh3dKCxrQqUcVMxwU54orwlJjOpO3ao8L7j5lH+dWfTwgCwIuM9GQ2kvVotzYJMXTBZg==
+typescript@5.0.4:
+ version "5.0.4"
+ resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.0.4.tgz#b217fd20119bd61a94d4011274e0ab369058da3b"
+ integrity sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==
universalify@^0.1.0:
version "0.1.2"