/**
* @license
* Copyright 2018 Google LLC
*
* Use of this source code is governed by an MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT.
* =============================================================================
*/
///
import * as tfc from '@tensorflow/tfjs-core';
import { io, ModelPredictConfig as ModelPredictArgs, NamedTensorMap, Optimizer, Scalar, Tensor } from '@tensorflow/tfjs-core';
import { BaseCallback, History, ModelLoggingVerbosity } from '../base_callbacks';
import { Shape } from '../keras_format/common';
import { TrainingConfig } from '../keras_format/training_config';
import { LossOrMetricFn, NamedTensor } from '../types';
import { Container, ContainerArgs } from './container';
import { Dataset } from './dataset_stub';
import { DisposeResult } from './topology';
import { ModelEvaluateDatasetArgs, ModelFitDatasetArgs } from './training_dataset';
import { ModelFitArgs } from './training_tensors';
import { ClassWeight, ClassWeightMap } from './training_utils';
/**
* Helper function for polymorphic input data: 1. singleton Tensor.
*/
export declare function isDataTensor(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
} | {
[inputName: string]: Tensor[];
}): boolean;
/**
* Helper function for polymorphic input data: 2. Array of Tensor.
*/
export declare function isDataArray(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}): boolean;
/**
* Helper function for polymorphic input data: 3. "dict" of Tensor.
*/
export declare function isDataDict(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}): boolean;
/**
* Normalizes inputs and targets provided by users.
* @param data User-provided input data (polymorphic).
* @param names An Array of expected Tensor names.
* @param shapes Optional Array of expected Tensor shapes.
* @param checkBatchAxis Whether to check that the batch axis of the arrays
* match the expected value found in `shapes`.
* @param exceptionPrefix String prefix used for exception formatting.
* @returns List of standardized input Tensors (one Tensor per model input).
* @throws ValueError: in case of improperly formatted user data.
*/
export declare function standardizeInputData(data: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, names: string[], shapes?: Shape[], checkBatchAxis?: boolean, exceptionPrefix?: string): Tensor[];
/**
* User input validation for Tensors.
* @param inputs `Array` of `tf.Tensor`s for inputs.
* @param targets `Array` of `tf.Tensor`s for targets.
* @param weights Optional `Array` of `tf.Tensor`s for sample weights.
* @throws ValueError: in case of incorrectly formatted data.
*/
export declare function checkArrayLengths(inputs: Tensor[], targets: Tensor[], weights?: Tensor[]): void;
/**
* Maps metric functions to model outputs.
* @param metrics An shortcut strings name, metric function, `Array` or dict
* (`Object`) of metric functions.
* @param outputNames An `Array` of the names of model outputs.
* @returns An `Array` (one entry per model output) of `Array` of metric
* functions. For instance, if the model has 2 outputs, and for the first
* output we want to compute `binaryAccuracy` and `binaryCrossentropy`,
* and just `binaryAccuracy` for the second output, the `Array` would look
* like:
* `[[binaryAccuracy, binaryCrossentropy], [binaryAccuracy]]`
* @throws TypeError: incompatible metrics format.
*/
export declare function collectMetrics(metrics: string | LossOrMetricFn | Array | {
[outputName: string]: string | LossOrMetricFn;
}, outputNames: string[]): Array>;
export interface ModelEvaluateArgs {
/**
* Batch size (Integer). If unspecified, it will default to 32.
*/
batchSize?: number;
/**
* Verbosity mode.
*/
verbose?: ModelLoggingVerbosity;
/**
* Tensor of weights to weight the contribution of different samples to the
* loss and metrics.
*/
sampleWeight?: Tensor;
/**
* integer: total number of steps (batches of samples)
* before declaring the evaluation round finished. Ignored with the default
* value of `undefined`.
*/
steps?: number;
}
/**
* Configuration for calls to `LayersModel.compile()`.
*/
export interface ModelCompileArgs {
/**
* An instance of `tf.train.Optimizer` or a string name for an Optimizer.
*/
optimizer: string | Optimizer;
/**
* Object function(s) or name(s) of object function(s).
* If the model has multiple outputs, you can use a different loss
* on each output by passing a dictionary or an Array of losses.
* The loss value that will be minimized by the model will then be the sum
* of all individual losses.
*/
loss: string | string[] | {
[outputName: string]: string;
} | LossOrMetricFn | LossOrMetricFn[] | {
[outputName: string]: LossOrMetricFn;
};
/**
* List of metrics to be evaluated by the model during training and testing.
* Typically you will use `metrics=['accuracy']`.
* To specify different metrics for different outputs of a multi-output
* model, you could also pass a dictionary.
*/
metrics?: string | LossOrMetricFn | Array | {
[outputName: string]: string | LossOrMetricFn;
};
}
/**
* A `tf.LayersModel` is a directed, acyclic graph of `tf.Layer`s plus methods
* for training, evaluation, prediction and saving.
*
* `tf.LayersModel` is the basic unit of training, inference and evaluation in
* TensorFlow.js. To create a `tf.LayersModel`, use `tf.LayersModel`.
*
* See also:
* `tf.Sequential`, `tf.loadLayersModel`.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
export declare class LayersModel extends Container implements tfc.InferenceModel {
/** @nocollapse */
static className: string;
protected optimizer_: Optimizer;
protected isOptimizerOwned: boolean;
loss: string | string[] | {
[outputName: string]: string;
} | LossOrMetricFn | LossOrMetricFn[] | {
[outputName: string]: LossOrMetricFn;
};
lossFunctions: LossOrMetricFn[];
private feedOutputShapes;
private feedLossFns;
private collectedTrainableWeights;
private testFunction;
history: History;
protected stopTraining_: boolean;
protected isTraining: boolean;
metrics: string | LossOrMetricFn | Array | {
[outputName: string]: string | LossOrMetricFn;
};
metricsNames: string[];
metricsTensors: Array<[LossOrMetricFn, number]>;
private userDefinedMetadata;
constructor(args: ContainerArgs);
/**
* Print a text summary of the model's layers.
*
* The summary includes
* - Name and type of all layers that comprise the model.
* - Output shape(s) of the layers
* - Number of weight parameters of each layer
* - If the model has non-sequential-like topology, the inputs each layer
* receives
* - The total number of trainable and non-trainable parameters of the model.
*
* ```js
* const input1 = tf.input({shape: [10]});
* const input2 = tf.input({shape: [20]});
* const dense1 = tf.layers.dense({units: 4}).apply(input1);
* const dense2 = tf.layers.dense({units: 8}).apply(input2);
* const concat = tf.layers.concatenate().apply([dense1, dense2]);
* const output =
* tf.layers.dense({units: 3, activation: 'softmax'}).apply(concat);
*
* const model = tf.model({inputs: [input1, input2], outputs: output});
* model.summary();
* ```
*
* @param lineLength Custom line length, in number of characters.
* @param positions Custom widths of each of the columns, as either
* fractions of `lineLength` (e.g., `[0.5, 0.75, 1]`) or absolute number
* of characters (e.g., `[30, 50, 65]`). Each number corresponds to
* right-most (i.e., ending) position of a column.
* @param printFn Custom print function. Can be used to replace the default
* `console.log`. For example, you can use `x => {}` to mute the printed
* messages in the console.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
summary(lineLength?: number, positions?: number[], printFn?: (message?: any, ...optionalParams: any[]) => void): void;
/**
* Configures and prepares the model for training and evaluation. Compiling
* outfits the model with an optimizer, loss, and/or metrics. Calling `fit`
* or `evaluate` on an un-compiled model will throw an error.
*
* @param args a `ModelCompileArgs` specifying the loss, optimizer, and
* metrics to be used for fitting and evaluating this model.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
compile(args: ModelCompileArgs): void;
/**
* Check trainable weights count consistency.
*
* This will raise a warning if `this.trainableWeights` and
* `this.collectedTrainableWeights` are inconsistent (i.e., have different
* numbers of parameters).
* Inconsistency will typically arise when one modifies `model.trainable`
* without calling `model.compile()` again.
*/
protected checkTrainableWeightsConsistency(): void;
/**
* Returns the loss value & metrics values for the model in test mode.
*
* Loss and metrics are specified during `compile()`, which needs to happen
* before calls to `evaluate()`.
*
* Computation is done in batches.
*
* ```js
* const model = tf.sequential({
* layers: [tf.layers.dense({units: 1, inputShape: [10]})]
* });
* model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});
* const result = model.evaluate(
* tf.ones([8, 10]), tf.ones([8, 1]), {batchSize: 4});
* result.print();
* ```
*
* @param x `tf.Tensor` of test data, or an `Array` of `tf.Tensor`s if the
* model has multiple inputs.
* @param y `tf.Tensor` of target data, or an `Array` of `tf.Tensor`s if the
* model has multiple outputs.
* @param args A `ModelEvaluateArgs`, containing optional fields.
*
* @return `Scalar` test loss (if the model has a single output and no
* metrics) or `Array` of `Scalar`s (if the model has multiple outputs
* and/or metrics). The attribute `model.metricsNames`
* will give you the display labels for the scalar outputs.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
evaluate(x: Tensor | Tensor[], y: Tensor | Tensor[], args?: ModelEvaluateArgs): Scalar | Scalar[];
/**
* Evaluate model using a dataset object.
*
* Note: Unlike `evaluate()`, this method is asynchronous (`async`).
*
* @param dataset A dataset object. Its `iterator()` method is expected
* to generate a dataset iterator object, the `next()` method of which
* is expected to produce data batches for evaluation. The return value
* of the `next()` call ought to contain a boolean `done` field and a
* `value` field. The `value` field is expected to be an array of two
* `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former
* case is for models with exactly one input and one output (e.g.
* a sequential model). The latter case is for models with multiple
* inputs and/or multiple outputs. Of the two items in the array, the
* first is the input feature(s) and the second is the output target(s).
* @param args A configuration object for the dataset-based evaluation.
* @returns Loss and metric values as an Array of `Scalar` objects.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
evaluateDataset(dataset: Dataset<{}>, args?: ModelEvaluateDatasetArgs): Promise;
/**
* Get number of samples provided for training, evaluation or prediction.
*
* @param ins Input `tf.Tensor`.
* @param batchSize Integer batch size, optional.
* @param steps Total number of steps (batches of samples) before
* declaring loop finished. Optional.
* @param stepsName The public API's parameter name for `steps`.
* @returns Number of samples provided.
*/
private checkNumSamples;
/**
* Execute internal tensors of the model with input data feed.
* @param inputs Input data feed. Must match the inputs of the model.
* @param outputs Names of the output tensors to be fetched. Must match
* names of the SymbolicTensors that belong to the graph.
* @returns Fetched values for `outputs`.
*/
execute(inputs: Tensor | Tensor[] | NamedTensorMap, outputs: string | string[]): Tensor | Tensor[];
/**
* Retrieve the model's internal symbolic tensors from symbolic-tensor names.
*/
private retrieveSymbolicTensors;
/**
* Helper method to loop over some data in batches.
*
* Porting Note: Not using the functional approach in the Python equivalent
* due to the imperative backend.
* Porting Note: Does not support step mode currently.
*
* @param ins: input data
* @param batchSize: integer batch size.
* @param verbose: verbosity model
* @returns: Predictions as `tf.Tensor` (if a single output) or an `Array` of
* `tf.Tensor` (if multipe outputs).
*/
private predictLoop;
/**
* Generates output predictions for the input samples.
*
* Computation is done in batches.
*
* Note: the "step" mode of predict() is currently not supported.
* This is because the TensorFlow.js core backend is imperative only.
*
* ```js
* const model = tf.sequential({
* layers: [tf.layers.dense({units: 1, inputShape: [10]})]
* });
* model.predict(tf.ones([8, 10]), {batchSize: 4}).print();
* ```
*
* @param x The input data, as a Tensor, or an `Array` of `tf.Tensor`s if
* the model has multiple inputs.
* @param args A `ModelPredictArgs` object containing optional fields.
*
* @return Prediction results as a `tf.Tensor`(s).
*
* @exception ValueError In case of mismatch between the provided input data
* and the model's expectations, or in case a stateful model receives a
* number of samples that is not a multiple of the batch size.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
predict(x: Tensor | Tensor[], args?: ModelPredictArgs): Tensor | Tensor[];
/**
* Returns predictions for a single batch of samples.
*
* ```js
* const model = tf.sequential({
* layers: [tf.layers.dense({units: 1, inputShape: [10]})]
* });
* model.predictOnBatch(tf.ones([8, 10])).print();
* ```
* @param x: Input samples, as a Tensor (for models with exactly one
* input) or an array of Tensors (for models with more than one input).
* @return Tensor(s) of predictions
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
predictOnBatch(x: Tensor | Tensor[]): Tensor | Tensor[];
protected standardizeUserDataXY(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, y: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, checkBatchAxis?: boolean, batchSize?: number): [Tensor[], Tensor[]];
protected standardizeUserData(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, y: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, sampleWeight?: Tensor | Tensor[] | {
[outputName: string]: Tensor;
}, classWeight?: ClassWeight | ClassWeight[] | ClassWeightMap, checkBatchAxis?: boolean, batchSize?: number): Promise<[Tensor[], Tensor[], Tensor[]]>;
/**
* Loop over some test data in batches.
* @param f A Function returning a list of tensors.
* @param ins Array of tensors to be fed to `f`.
* @param batchSize Integer batch size or `null` / `undefined`.
* @param verbose verbosity mode.
* @param steps Total number of steps (batches of samples) before
* declaring test finished. Ignored with the default value of `null` /
* `undefined`.
* @returns Array of Scalars.
*/
private testLoop;
protected getDedupedMetricsNames(): string[];
/**
* Creates a function that performs the following actions:
*
* 1. computes the losses
* 2. sums them to get the total loss
* 3. call the optimizer computes the gradients of the LayersModel's
* trainable weights w.r.t. the total loss and update the variables
* 4. calculates the metrics
* 5. returns the values of the losses and metrics.
*/
protected makeTrainFunction(): (data: Tensor[]) => Scalar[];
/**
* Create a function which, when invoked with an array of `tf.Tensor`s as a
* batch of inputs, returns the prespecified loss and metrics of the model
* under the batch of input data.
*/
private makeTestFunction;
/**
* Trains the model for a fixed number of epochs (iterations on a
* dataset).
*
* ```js
* const model = tf.sequential({
* layers: [tf.layers.dense({units: 1, inputShape: [10]})]
* });
* model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});
* for (let i = 1; i < 5 ; ++i) {
* const h = await model.fit(tf.ones([8, 10]), tf.ones([8, 1]), {
* batchSize: 4,
* epochs: 3
* });
* console.log("Loss after Epoch " + i + " : " + h.history.loss[0]);
* }
* ```
*
* @param x `tf.Tensor` of training data, or an array of `tf.Tensor`s if the
* model has multiple inputs. If all inputs in the model are named, you
* can also pass a dictionary mapping input names to `tf.Tensor`s.
* @param y `tf.Tensor` of target (label) data, or an array of `tf.Tensor`s if
* the model has multiple outputs. If all outputs in the model are named,
* you can also pass a dictionary mapping output names to `tf.Tensor`s.
* @param args A `ModelFitArgs`, containing optional fields.
*
* @return A `History` instance. Its `history` attribute contains all
* information collected during training.
*
* @exception ValueError In case of mismatch between the provided input
* data and what the model expects.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
fit(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, y: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, args?: ModelFitArgs): Promise;
/**
* Abstract fit function for `f(ins)`.
* @param f A Function returning a list of tensors. For training, this
* function is expected to perform the updates to the variables.
* @param ins List of tensors to be fed to `f`.
* @param outLabels List of strings, display names of the outputs of `f`.
* @param batchSize Integer batch size or `== null` if unknown. Default : 32.
* @param epochs Number of times to iterate over the data. Default : 1.
* @param verbose Verbosity mode: 0, 1, or 2. Default: 1.
* @param callbacks List of callbacks to be called during training.
* @param valF Function to call for validation.
* @param valIns List of tensors to be fed to `valF`.
* @param shuffle Whether to shuffle the data at the beginning of every
* epoch. Default : true.
* @param callbackMetrics List of strings, the display names of the metrics
* passed to the callbacks. They should be the concatenation of the
* display names of the outputs of `f` and the list of display names
* of the outputs of `valF`.
* @param initialEpoch Epoch at which to start training (useful for
* resuming a previous training run). Default : 0.
* @param stepsPerEpoch Total number of steps (batches on samples) before
* declaring one epoch finished and starting the next epoch. Ignored with
* the default value of `undefined` or `null`.
* @param validationSteps Number of steps to run validation for (only if
* doing validation from data tensors). Not applicable for tfjs-layers.
* @returns A `History` object.
*/
fitLoop(f: (data: Tensor[]) => Scalar[], ins: Tensor[], outLabels?: string[], batchSize?: number, epochs?: number, verbose?: number, callbacks?: BaseCallback[], valF?: (data: Tensor[]) => Scalar[], valIns?: Tensor[], shuffle?: boolean | string, callbackMetrics?: string[], initialEpoch?: number, stepsPerEpoch?: number, validationSteps?: number): Promise;
/**
* Trains the model using a dataset object.
*
* @param dataset A dataset object. Its `iterator()` method is expected
* to generate a dataset iterator object, the `next()` method of which
* is expected to produce data batches for training. The return value
* of the `next()` call ought to contain a boolean `done` field and a
* `value` field. The `value` field is expected to be an array of two
* `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former
* case is for models with exactly one input and one output (e.g.
* a sequential model). The latter case is for models with multiple
* inputs and/or multiple outputs.
* Of the two items in the array, the first is the input feature(s) and
* the second is the output target(s).
* @param args A `ModelFitDatasetArgs`, containing optional fields.
*
* @return A `History` instance. Its `history` attribute contains all
* information collected during training.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
fitDataset(dataset: Dataset, args: ModelFitDatasetArgs): Promise;
/**
* Runs a single gradient update on a single batch of data.
*
* This method differs from `fit()` and `fitDataset()` in the following
* regards:
* - It operates on exactly one batch of data.
* - It returns only the loss and metric values, instead of
* returning the batch-by-batch loss and metric values.
* - It doesn't support fine-grained options such as verbosity and
* callbacks.
*
* @param x Input data. It could be one of the following:
* - A `tf.Tensor`, or an Array of `tf.Tensor`s (in case the model has
* multiple inputs).
* - An Object mapping input names to corresponding `tf.Tensor` (if the
* model has named inputs).
* @param y Target data. It could be either a `tf.Tensor` or multiple
* `tf.Tensor`s. It should be consistent with `x`.
* @returns Training loss or losses (in case the model has
* multiple outputs), along with metrics (if any), as numbers.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
trainOnBatch(x: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}, y: Tensor | Tensor[] | {
[inputName: string]: Tensor;
}): Promise;
/**
* Extract weight values of the model.
*
* @param config: An instance of `io.SaveConfig`, which specifies
* model-saving options such as whether only trainable weights are to be
* saved.
* @returns A `NamedTensorMap` mapping original weight names (i.e.,
* non-uniqueified weight names) to their values.
*/
protected getNamedWeights(config?: io.SaveConfig): NamedTensor[];
/**
* Setter used for force stopping of LayersModel.fit() (i.e., training).
*
* Example:
*
* ```js
* const input = tf.input({shape: [10]});
* const output = tf.layers.dense({units: 1}).apply(input);
* const model = tf.model({inputs: [input], outputs: [output]});
* model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});
* const xs = tf.ones([8, 10]);
* const ys = tf.zeros([8, 1]);
*
* const history = await model.fit(xs, ys, {
* epochs: 10,
* callbacks: {
* onEpochEnd: async (epoch, logs) => {
* if (epoch === 2) {
* model.stopTraining = true;
* }
* }
* }
* });
*
* // There should be only 3 values in the loss array, instead of 10
* values,
* // due to the stopping after 3 epochs.
* console.log(history.history.loss);
* ```
*/
set stopTraining(stop: boolean);
get stopTraining(): boolean;
get optimizer(): Optimizer;
set optimizer(optimizer: Optimizer);
dispose(): DisposeResult;
private getLossIdentifiers;
private getMetricIdentifiers;
protected getTrainingConfig(): TrainingConfig;
loadTrainingConfig(trainingConfig: TrainingConfig): void;
/**
* Save the configuration and/or weights of the LayersModel.
*
* An `IOHandler` is an object that has a `save` method of the proper
* signature defined. The `save` method manages the storing or
* transmission of serialized data ("artifacts") that represent the
* model's topology and weights onto or via a specific medium, such as
* file downloads, local storage, IndexedDB in the web browser and HTTP
* requests to a server. TensorFlow.js provides `IOHandler`
* implementations for a number of frequently used saving mediums, such as
* `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io`
* for more details.
*
* This method also allows you to refer to certain types of `IOHandler`s
* as URL-like string shortcuts, such as 'localstorage://' and
* 'indexeddb://'.
*
* Example 1: Save `model`'s topology and weights to browser [local
* storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);
* then load it back.
*
* ```js
* const model = tf.sequential(
* {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});
* console.log('Prediction from original model:');
* model.predict(tf.ones([1, 3])).print();
*
* const saveResults = await model.save('localstorage://my-model-1');
*
* const loadedModel = await tf.loadLayersModel('localstorage://my-model-1');
* console.log('Prediction from loaded model:');
* loadedModel.predict(tf.ones([1, 3])).print();
* ```
*
* Example 2. Saving `model`'s topology and weights to browser
* [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API);
* then load it back.
*
* ```js
* const model = tf.sequential(
* {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});
* console.log('Prediction from original model:');
* model.predict(tf.ones([1, 3])).print();
*
* const saveResults = await model.save('indexeddb://my-model-1');
*
* const loadedModel = await tf.loadLayersModel('indexeddb://my-model-1');
* console.log('Prediction from loaded model:');
* loadedModel.predict(tf.ones([1, 3])).print();
* ```
*
* Example 3. Saving `model`'s topology and weights as two files
* (`my-model-1.json` and `my-model-1.weights.bin`) downloaded from
* browser.
*
* ```js
* const model = tf.sequential(
* {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});
* const saveResults = await model.save('downloads://my-model-1');
* ```
*
* Example 4. Send `model`'s topology and weights to an HTTP server.
* See the documentation of `tf.io.http` for more details
* including specifying request parameters and implementation of the
* server.
*
* ```js
* const model = tf.sequential(
* {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});
* const saveResults = await model.save('http://my-server/model/upload');
* ```
*
* @param handlerOrURL An instance of `IOHandler` or a URL-like,
* scheme-based string shortcut for `IOHandler`.
* @param config Options for saving the model.
* @returns A `Promise` of `SaveResult`, which summarizes the result of
* the saving, such as byte sizes of the saved artifacts for the model's
* topology and weight values.
*
* @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
*/
save(handlerOrURL: io.IOHandler | string, config?: io.SaveConfig): Promise;
/**
* Set user-defined metadata.
*
* The set metadata will be serialized together with the topology
* and weights of the model during `save()` calls.
*
* @param setUserDefinedMetadata
*/
setUserDefinedMetadata(userDefinedMetadata: {}): void;
/**
* Get user-defined metadata.
*
* The metadata is supplied via one of the two routes:
* 1. By calling `setUserDefinedMetadata()`.
* 2. Loaded during model loading (if the model is constructed
* via `tf.loadLayersModel()`.)
*
* If no user-defined metadata is available from either of the
* two routes, this function will return `undefined`.
*/
getUserDefinedMetadata(): {};
}
/**
* A `tf.Functional` is an alias to `tf.LayersModel`.
*
* See also:
* `tf.LayersModel`, `tf.Sequential`, `tf.loadLayersModel`.
*/
/** @doc {heading: 'Models', subheading: 'Classes'} */
export declare class Functional extends LayersModel {
static className: string;
}