UNPKG

23.5 kBTypeScriptView Raw
1/**
2 * @license
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 * =============================================================================
9 */
10/// <amd-module name="@tensorflow/tfjs-layers/dist/models" />
11import { io, Optimizer, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';
12import { History } from './base_callbacks';
13import { Dataset } from './engine/dataset_stub';
14import { Layer } from './engine/topology';
15import { LayersModel, ModelCompileArgs, ModelEvaluateArgs } from './engine/training';
16import { ModelEvaluateDatasetArgs, ModelFitDatasetArgs } from './engine/training_dataset';
17import { ModelFitArgs } from './engine/training_tensors';
18import { Shape } from './keras_format/common';
19import { PyJsonDict } from './keras_format/types';
20import { Kwargs } from './types';
21/**
22 * Parses a JSON model configuration file and returns a model instance.
23 *
24 * ```js
25 * // This example shows how to serialize a model using `toJSON()` and
26 * // deserialize it as another model using `tf.models.modelFromJSON()`.
27 * // Note: this example serializes and deserializes only the topology
28 * // of the model; the weights of the loaded model will be different
29 * // from those of the the original model, due to random weight
30 * // initialization.
31 * // To load the topology and weights of a model, use `tf.loadLayersModel()`.
32 * const model1 = tf.sequential();
33 * model1.add(tf.layers.repeatVector({inputShape: [2], n: 4}));
34 * // Serialize `model1` as a JSON object.
35 * const model1JSON = model1.toJSON(null, false);
36 * model1.summary();
37 *
38 * const model2 = await tf.models.modelFromJSON(model1JSON);
39 * model2.summary();
40 * ```
41 *
42 * @param modelAndWeightsConfig JSON object or string encoding a model and
43 * weights configuration. It can also be only the topology JSON of the
44 * model, in which case the weights will not be loaded.
45 * @param custom_objects Optional dictionary mapping names
46 * (strings) to custom classes or functions to be
47 * considered during deserialization.
48 * @returns A TensorFlow.js Layers `tf.LayersModel` instance (uncompiled).
49 */
50export declare function modelFromJSON(modelAndWeightsConfig: ModelAndWeightsConfig | PyJsonDict, customObjects?: serialization.ConfigDict): Promise<LayersModel>;
51/**
52 * Options for loading a saved mode in TensorFlow.js format.
53 */
54export interface ModelAndWeightsConfig {
55 /**
56 * A JSON object or JSON string containing the model config.
57 *
58 * This can be either of the following two formats:
59 * - A model archiecture-only config, i.e., a format consistent with the
60 * return value of`keras.Model.to_json()`.
61 * - A full model config, containing not only model architecture, but also
62 * training options and state, i.e., a format consistent with the return
63 * value of `keras.models.save_model()`.
64 */
65 modelTopology: PyJsonDict;
66 /**
67 * A weights manifest in TensorFlow.js format.
68 */
69 weightsManifest?: io.WeightsManifestConfig;
70 /**
71 * Path to prepend to the paths in `weightManifest` before fetching.
72 *
73 * The path may optionally end in a slash ('/').
74 */
75 pathPrefix?: string;
76}
77export interface ModelPredictArgs {
78 /**
79 * Optional. Batch size (Integer). If unspecified, it will default to 32.
80 */
81 batchSize?: number;
82 /**
83 * Optional. Verbosity mode. Defaults to false.
84 */
85 verbose?: boolean;
86}
87/**
88 * Load a model, including its topology and optionally weights. See the
89 * Tutorial named "How to import a Keras Model" for usage examples.
90 *
91 * Example 1: Save `model`'s topology and weights to browser [local
92 * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);
93 * then load it back.
94 *
95 * ```js
96 * const model = tf.sequential(
97 * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});
98 * console.log('Prediction from original model:');
99 * model.predict(tf.ones([1, 3])).print();
100 *
101 * const saveResults = await model.save('localstorage://my-model-1');
102 *
103 * const loadedModel = await tf.loadLayersModel('localstorage://my-model-1');
104 * console.log('Prediction from loaded model:');
105 * loadedModel.predict(tf.ones([1, 3])).print();
106 * ```
107 *
108 * Example 2. Saving `model`'s topology and weights to browser
109 * [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API);
110 * then load it back.
111 *
112 * ```js
113 * const model = tf.sequential(
114 * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});
115 * console.log('Prediction from original model:');
116 * model.predict(tf.ones([1, 3])).print();
117 *
118 * const saveResults = await model.save('indexeddb://my-model-1');
119 *
120 * const loadedModel = await tf.loadLayersModel('indexeddb://my-model-1');
121 * console.log('Prediction from loaded model:');
122 * loadedModel.predict(tf.ones([1, 3])).print();
123 * ```
124 *
125 * Example 3. Load a model from user-selected files from HTML
126 * [file input
127 * elements](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file).
128 *
129 * ```js
130 * // Note: this code snippet will not work without the HTML elements in the
131 * // page
132 * const jsonUpload = document.getElementById('json-upload');
133 * const weightsUpload = document.getElementById('weights-upload');
134 *
135 * const model = await tf.loadLayersModel(
136 * tf.io.browserFiles([jsonUpload.files[0], weightsUpload.files[0]]));
137 * ```
138 *
139 * Example 4. Load a model from an HTTP server.
140 *
141 * ```js
142 * const model = await
143 * tf.loadLayersModel('https://storage.googleapis.com/tfjs-models/tfjs/iris_v1/model.json');
144 * model.summary();
145 * ```
146 *
147 * @param pathOrIOHandler Can be either of the two formats
148 * 1. A string path to the `ModelAndWeightsConfig` JSON describing
149 * the model in the canonical TensorFlow.js format. This path will be
150 * interpreted as a relative HTTP path, to which `fetch` will be used to
151 * request the model topology and weight manifest JSON.
152 * The content of the JSON file is assumed to be a JSON object with the
153 * following fields and values:
154 * - 'modelTopology': A JSON object that can be either of:
155 * 1. a model architecture JSON consistent with the format of the return
156 * value of `keras.Model.to_json()`
157 * 2. a full model JSON in the format of `keras.models.save_model()`.
158 * - 'weightsManifest': A TensorFlow.js weights manifest.
159 * See the Python converter function `save_model()` for more details.
160 * It is also assumed that model weights can be accessed from relative
161 * paths described by the `paths` fields in weights manifest.
162 * 2. An `tf.io.IOHandler` object that loads model artifacts with its `load`
163 * method.
164 * @param options Optional configuration arguments for the model loading,
165 * including:
166 * - `strict`: Require that the provided weights exactly match those required
167 * by the layers. Default true. Passing false means that both extra
168 * weights and missing weights will be silently ignored.
169 * - `onProgress`: A progress callback of the form:
170 * `(fraction: number) => void`. This callback can be used to monitor the
171 * model-loading process.
172 * @returns A `Promise` of `tf.LayersModel`, with the topology and weights
173 * loaded.
174 */
175export declare function loadLayersModelInternal(pathOrIOHandler: string | io.IOHandler, options?: io.LoadOptions): Promise<LayersModel>;
176/**
177 * Load a model and optionally its weights, using an IOHandler object.
178 *
179 * @param handler The instance of `IOHandler` to be used during the model
180 * loading.
181 * @param customObjects Any optional custom objects to be used during model
182 * loading.
183 * @param strict Whether the weight loading will be done in strict mode.
184 * Default: `true`.
185 */
186export declare function loadLayersModelFromIOHandler(handler: io.IOHandler, customObjects?: serialization.ConfigDict, options?: io.LoadOptions): Promise<LayersModel>;
187/**
188 * Configuration for a Sequential model.
189 */
190export interface SequentialArgs {
191 /** Stack of layers for the model. */
192 layers?: Layer[];
193 /** The name of this model. */
194 name?: string;
195}
196/**
197 * A model with a stack of layers, feeding linearly from one to the next.
198 *
199 * `tf.sequential` is a factory function that creates an instance of
200 * `tf.Sequential`.
201 *
202 * ```js
203 * // Define a model for linear regression.
204 * const model = tf.sequential();
205 * model.add(tf.layers.dense({units: 1, inputShape: [1]}));
206 *
207 * // Prepare the model for training: Specify the loss and the optimizer.
208 * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});
209 *
210 * // Generate some synthetic data for training.
211 * const xs = tf.tensor2d([1, 2, 3, 4], [4, 1]);
212 * const ys = tf.tensor2d([1, 3, 5, 7], [4, 1]);
213 *
214 * // Train the model using the data then do inference on a data point the
215 * // model hasn't seen:
216 * await model.fit(xs, ys);
217 * model.predict(tf.tensor2d([5], [1, 1])).print();
218 * ```
219 *
220 * @doc {heading: 'Models', subheading: 'Classes'}
221 */
222export declare class Sequential extends LayersModel {
223 /** @nocollapse */
224 static className: string;
225 private model;
226 constructor(args?: SequentialArgs);
227 private checkShape;
228 /**
229 * Adds a layer instance on top of the layer stack.
230 *
231 * ```js
232 * const model = tf.sequential();
233 * model.add(tf.layers.dense({units: 8, inputShape: [1]}));
234 * model.add(tf.layers.dense({units: 4, activation: 'relu6'}));
235 * model.add(tf.layers.dense({units: 1, activation: 'relu6'}));
236 * // Note that the untrained model is random at this point.
237 * model.predict(tf.randomNormal([10, 1])).print();
238 * ```
239 * @param layer Layer instance.
240 *
241 * @exception ValueError In case the `layer` argument does not know its
242 * input shape.
243 * @exception ValueError In case the `layer` argument has multiple output
244 * tensors, or is already connected somewhere else (forbidden in
245 * `Sequential` models).
246 *
247 * @doc {heading: 'Models', subheading: 'Classes'}
248 */
249 add(layer: Layer): void;
250 /**
251 * Removes the last layer in the model.
252 *
253 * @exception TypeError if there are no layers in the model.
254 */
255 pop(): void;
256 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
257 build(inputShape?: Shape | Shape[]): void;
258 countParams(): number;
259 /**
260 * Print a text summary of the Sequential model's layers.
261 *
262 * The summary includes
263 * - Name and type of all layers that comprise the model.
264 * - Output shape(s) of the layers
265 * - Number of weight parameters of each layer
266 * - The total number of trainable and non-trainable parameters of the
267 * model.
268 *
269 * ```js
270 * const model = tf.sequential();
271 * model.add(
272 * tf.layers.dense({units: 100, inputShape: [10], activation: 'relu'}));
273 * model.add(tf.layers.dense({units: 1, activation: 'sigmoid'}));
274 *
275 * model.summary();
276 * ```
277 *
278 * @param lineLength Custom line length, in number of characters.
279 * @param positions Custom widths of each of the columns, as either
280 * fractions of `lineLength` (e.g., `[0.5, 0.75, 1]`) or absolute number
281 * of characters (e.g., `[30, 50, 65]`). Each number corresponds to
282 * right-most (i.e., ending) position of a column.
283 * @param printFn Custom print function. Can be used to replace the default
284 * `console.log`. For example, you can use `x => {}` to mute the printed
285 * messages in the console.
286 *
287 * @doc {heading: 'Models', subheading: 'Classes'}
288 */
289 summary(lineLength?: number, positions?: number[], printFn?: (message?: any, ...optionalParams: any[]) => void): void;
290 /**
291 * Sets the weights of the model.
292 *
293 * @param weights Should be a list of Tensors with shapes and types matching
294 * the output of `model.getWeights()`.
295 */
296 setWeights(weights: Tensor[]): void;
297 /**
298 * Returns the loss value & metrics values for the model in test mode.
299 *
300 * Loss and metrics are specified during `compile()`, which needs to happen
301 * before calls to `evaluate()`.
302 *
303 * Computation is done in batches.
304 *
305 * ```js
306 * const model = tf.sequential({
307 * layers: [tf.layers.dense({units: 1, inputShape: [10]})]
308 * });
309 * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});
310 * const result = model.evaluate(tf.ones([8, 10]), tf.ones([8, 1]), {
311 * batchSize: 4,
312 * });
313 * result.print();
314 * ```
315 *
316 * @param x `tf.Tensor` of test data, or an `Array` of `tf.Tensor`s if the
317 * model has multiple inputs.
318 * @param y `tf.Tensor` of target data, or an `Array` of `tf.Tensor`s if the
319 * model has multiple outputs.
320 * @param args A `ModelEvaluateConfig`, containing optional fields.
321 *
322 * @return `Scalar` test loss (if the model has a single output and no
323 * metrics) or `Array` of `Scalar`s (if the model has multiple outputs
324 * and/or metrics). The attribute `model.metricsNames`
325 * will give you the display labels for the scalar outputs.
326 *
327 * @doc {heading: 'Models', subheading: 'Classes'}
328 */
329 evaluate(x: Tensor | Tensor[], y: Tensor | Tensor[], args?: ModelEvaluateArgs): Scalar | Scalar[];
330 /**
331 * Evaluate model using a dataset object.
332 *
333 * Note: Unlike `evaluate()`, this method is asynchronous (`async`);
334 *
335 * @param dataset A dataset object. Its `iterator()` method is expected
336 * to generate a dataset iterator object, the `next()` method of which
337 * is expected to produce data batches for evaluation. The return value
338 * of the `next()` call ought to contain a boolean `done` field and a
339 * `value` field. The `value` field is expected to be an array of two
340 * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former
341 * case is for models with exactly one input and one output (e.g..
342 * a sequential model). The latter case is for models with multiple
343 * inputs and/or multiple outputs. Of the two items in the array, the
344 * first is the input feature(s) and the second is the output target(s).
345 * @param args A configuration object for the dataset-based evaluation.
346 * @returns Loss and metric values as an Array of `Scalar` objects.
347 *
348 * @doc {heading: 'Models', subheading: 'Classes'}
349 */
350 evaluateDataset(dataset: Dataset<{}>, args: ModelEvaluateDatasetArgs): Promise<Scalar | Scalar[]>;
351 /**
352 * Generates output predictions for the input samples.
353 *
354 * Computation is done in batches.
355 *
356 * Note: the "step" mode of predict() is currently not supported.
357 * This is because the TensorFlow.js core backend is imperative only.
358 *
359 * ```js
360 * const model = tf.sequential({
361 * layers: [tf.layers.dense({units: 1, inputShape: [10]})]
362 * });
363 * model.predict(tf.ones([2, 10])).print();
364 * ```
365 *
366 * @param x The input data, as a Tensor, or an `Array` of `tf.Tensor`s if
367 * the model has multiple inputs.
368 * @param conifg A `ModelPredictConfig` object containing optional fields.
369 *
370 * @return `tf.Tensor`(s) of predictions.
371 *
372 * @exception ValueError In case of mismatch between the provided input data
373 * and the model's expectations, or in case a stateful model receives a
374 * number of samples that is not a multiple of the batch size.
375 *
376 * @doc {heading: 'Models', subheading: 'Classes'}
377 */
378 predict(x: Tensor | Tensor[], args?: ModelPredictArgs): Tensor | Tensor[];
379 /**
380 * Returns predictions for a single batch of samples.
381 *
382 * @param x: Input samples, as a Tensor, or list of Tensors (if the model
383 * has multiple inputs).
384 * @return Tensor(s) of predictions
385 */
386 predictOnBatch(x: Tensor): Tensor | Tensor[];
387 /**
388 * See `LayersModel.compile`.
389 *
390 * @param args
391 */
392 compile(args: ModelCompileArgs): void;
393 optimizer: Optimizer;
394 /**
395 * Trains the model for a fixed number of epochs (iterations on a dataset).
396 *
397 * ```js
398 * const model = tf.sequential({
399 * layers: [tf.layers.dense({units: 1, inputShape: [10]})]
400 * });
401 * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});
402 * const history = await model.fit(tf.ones([8, 10]), tf.ones([8, 1]), {
403 * batchSize: 4,
404 * epochs: 3
405 * });
406 * console.log(history.history.loss[0]);
407 * ```
408 *
409 * @param x `tf.Tensor` of training data, or an array of `tf.Tensor`s if the
410 * model has multiple inputs. If all inputs in the model are named, you can
411 * also pass a dictionary mapping input names to `tf.Tensor`s.
412 * @param y `tf.Tensor` of target (label) data, or an array of `tf.Tensor`s if
413 * the model has multiple outputs. If all outputs in the model are named, you
414 * can also pass a dictionary mapping output names to `tf.Tensor`s.
415 * @param args A `ModelFitConfig`, containing optional fields.
416 *
417 * @return A `History` instance. Its `history` attribute contains all
418 * information collected during training.
419 *
420 * @exception ValueError In case of mismatch between the provided input data
421 * and what the model expects.
422 *
423 * @doc {heading: 'Models', subheading: 'Classes'}
424 */
425 fit(x: Tensor | Tensor[] | {
426 [inputName: string]: Tensor;
427 }, y: Tensor | Tensor[] | {
428 [inputName: string]: Tensor;
429 }, args?: ModelFitArgs): Promise<History>;
430 /**
431 * Trains the model using a dataset object.
432 *
433 * ```js
434 * const xArray = [
435 * [1, 1, 1, 1, 1, 1, 1, 1, 1],
436 * [1, 1, 1, 1, 1, 1, 1, 1, 1],
437 * [1, 1, 1, 1, 1, 1, 1, 1, 1],
438 * [1, 1, 1, 1, 1, 1, 1, 1, 1],
439 * ];
440 * const yArray = [1, 1, 1, 1];
441 * // Create a dataset from the JavaScript array.
442 * const xDataset = tf.data.array(xArray);
443 * const yDataset = tf.data.array(yArray);
444 * // Zip combines the `x` and `y` Datasets into a single Dataset, the
445 * // iterator of which will return an object containing of two tensors,
446 * // corresponding to `x` and `y`. The call to `batch(4)` will bundle
447 * // four such samples into a single object, with the same keys now pointing
448 * // to tensors that hold 4 examples, organized along the batch dimension.
449 * // The call to `shuffle(4)` causes each iteration through the dataset to
450 * // happen in a different order. The size of the shuffle window is 4.
451 * const xyDataset = tf.data.zip({xs: xDataset, ys: yDataset})
452 * .batch(4)
453 * .shuffle(4);
454 * const model = tf.sequential({
455 * layers: [tf.layers.dense({units: 1, inputShape: [9]})]
456 * });
457 * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});
458 * const history = await model.fitDataset(xyDataset, {
459 * epochs: 4,
460 * callbacks: {onEpochEnd: (epoch, logs) => console.log(logs.loss)}
461 * });
462 * ```
463 *
464 * @param dataset A dataset object. Its `iterator()` method is expected to
465 * generate a dataset iterator object, the `next()` method of which is
466 * expected to produce data batches for evaluation. The return value of the
467 * `next()` call ought to contain a boolean `done` field and a `value`
468 * field.
469 *
470 * The `value` field is expected to be an object of with fields
471 * `xs` and `ys`, which point to the feature tensor and the target tensor,
472 * respectively. This case is for models with exactly one input and one
473 * output (e.g.. a sequential model). For example:
474 * ```js
475 * {value: {xs: xsTensor, ys: ysTensor}, done: false}
476 * ```
477 *
478 * If the model has multiple inputs, the `xs` field of `value` should
479 * be an object mapping input names to their respective feature tensors.
480 * For example:
481 * ```js
482 * {
483 * value: {
484 * xs: {
485 * input_1: xsTensor1,
486 * input_2: xsTensor2
487 * },
488 * ys: ysTensor
489 * },
490 * done: false
491 * }
492 * ```
493 * If the model has multiple outputs, the `ys` field of `value` should
494 * be an object mapping output names to their respective target tensors.
495 * For example:
496 * ```js
497 * {
498 * value: {
499 * xs: xsTensor,
500 * ys: {
501 * output_1: ysTensor1,
502 * output_2: ysTensor2
503 * },
504 * },
505 * done: false
506 * }
507 * ```
508 * @param args A `ModelFitDatasetArgs`, containing optional fields.
509 *
510 * @return A `History` instance. Its `history` attribute contains all
511 * information collected during training.
512 *
513 * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
514 */
515 fitDataset<T>(dataset: Dataset<T>, args: ModelFitDatasetArgs<T>): Promise<History>;
516 /**
517 * Runs a single gradient update on a single batch of data.
518 *
519 * This method differs from `fit()` and `fitDataset()` in the following
520 * regards:
521 * - It operates on exactly one batch of data.
522 * - It returns only the loss and matric values, instead of
523 * returning the batch-by-batch loss and metric values.
524 * - It doesn't support fine-grained options such as verbosity and
525 * callbacks.
526 *
527 * @param x Input data. It could be one of the following:
528 * - A `tf.Tensor`, or an Array of `tf.Tensor`s (in case the model has
529 * multiple inputs).
530 * - An Object mapping input names to corresponding `tf.Tensor` (if the
531 * model has named inputs).
532 * @param y Target darta. It could be either a `tf.Tensor` a multiple
533 * `tf.Tensor`s. It should be consistent with `x`.
534 * @returns Training loss or losses (in case the model has
535 * multiple outputs), along with metrics (if any), as numbers.
536 *
537 * @doc {heading: 'Models', subheading: 'Classes'}
538 */
539 trainOnBatch(x: Tensor | Tensor[] | {
540 [inputName: string]: Tensor;
541 }, y: Tensor | Tensor[] | {
542 [inputName: string]: Tensor;
543 }): Promise<number | number[]>;
544 /** @nocollapse */
545 static fromConfig<T extends serialization.Serializable>(cls: serialization.SerializableConstructor<T>, config: serialization.ConfigDict, customObjects?: serialization.ConfigDict, fastWeightInit?: boolean): T;
546 /**
547 * Setter used for force stopping of LayersModel.fit() (i.e., training).
548 *
549 * Example:
550 *
551 * ```js
552 * const model = tf.sequential();
553 * model.add(tf.layers.dense({units: 1, inputShape: [10]}));
554 * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});
555 * const xs = tf.ones([8, 10]);
556 * const ys = tf.zeros([8, 1]);
557 *
558 * const history = await model.fit(xs, ys, {
559 * epochs: 10,
560 * callbacks: {
561 * onEpochEnd: async (epoch, logs) => {
562 * if (epoch === 2) {
563 * model.stopTraining = true;
564 * }
565 * }
566 * }
567 * });
568 *
569 * // There should be only 3 values in the loss array, instead of 10 values,
570 * // due to the stopping after 3 epochs.
571 * console.log(history.history.loss);
572 * ```
573 */
574 stopTraining: boolean;
575 getConfig(): any;
576}