UNPKG

26.5 kBTypeScriptView Raw
1/**
2 * @license
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 * =============================================================================
9 */
10/// <amd-module name="@tensorflow/tfjs-layers/dist/engine/topology" />
11import { DataType, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';
12import { Constraint } from '../constraints';
13import { Initializer } from '../initializers';
14import { Shape } from '../keras_format/common';
15import { Regularizer } from '../regularizers';
16import { Kwargs, RegularizerFn } from '../types';
17import { LayerVariable } from '../variables';
18export type Op = (x: LayerVariable) => LayerVariable;
19/**
20 * Constructor arguments for InputSpec.
21 */
22export interface InputSpecArgs {
23 /** Expected datatype of the input. */
24 dtype?: DataType;
25 /** Expected shape of the input (may include null for unchecked axes). */
26 shape?: Shape;
27 /** Expected rank of the input. */
28 ndim?: number;
29 /** Maximum rank of the input. */
30 maxNDim?: number;
31 /** Minimum rank of the input. */
32 minNDim?: number;
33 /** Dictionary mapping integer axes to a specific dimension value. */
34 axes?: {
35 [axis: number]: number;
36 };
37}
38/**
39 * Specifies the ndim, dtype and shape of every input to a layer.
40 *
41 * Every layer should expose (if appropriate) an `inputSpec` attribute:
42 * a list of instances of InputSpec (one per input tensor).
43 *
44 * A null entry in a shape is compatible with any dimension,
45 * a null shape is compatible with any shape.
46 */
47export declare class InputSpec {
48 /** Expected datatype of the input. */
49 dtype?: DataType;
50 /** Expected shape of the input (may include null for unchecked axes). */
51 shape?: Shape;
52 /** Expected rank of the input. */
53 ndim?: number;
54 /** Maximum rank of the input. */
55 maxNDim?: number;
56 /** Minimum rank of the input. */
57 minNDim?: number;
58 /** Dictionary mapping integer axes to a specific dimension value. */
59 axes?: {
60 [axis: number]: number;
61 };
62 constructor(args: InputSpecArgs);
63}
64/**
65 * `tf.SymbolicTensor` is a placeholder for a Tensor without any concrete value.
66 *
67 * They are most often encountered when building a graph of `Layer`s for a
68 * `tf.LayersModel` and the input data's shape, but not values are known.
69 *
70 * @doc {heading: 'Models', 'subheading': 'Classes'}
71 */
72export declare class SymbolicTensor {
73 readonly dtype: DataType;
74 readonly shape: Shape;
75 sourceLayer: Layer;
76 readonly inputs: SymbolicTensor[];
77 readonly callArgs: Kwargs;
78 readonly outputTensorIndex?: number;
79 readonly id: number;
80 readonly name: string;
81 readonly originalName?: string;
82 /**
83 * Rank/dimensionality of the tensor.
84 */
85 readonly rank: number;
86 /**
87 * Replacement for _keras_history.
88 */
89 nodeIndex: number;
90 /**
91 * Replacement for _keras_history.
92 */
93 tensorIndex: number;
94 /**
95 *
96 * @param dtype
97 * @param shape
98 * @param sourceLayer The Layer that produced this symbolic tensor.
99 * @param inputs The inputs passed to sourceLayer's __call__() method.
100 * @param nodeIndex
101 * @param tensorIndex
102 * @param callArgs The keyword arguments passed to the __call__() method.
103 * @param name
104 * @param outputTensorIndex The index of this tensor in the list of outputs
105 * returned by apply().
106 */
107 constructor(dtype: DataType, shape: Shape, sourceLayer: Layer, inputs: SymbolicTensor[], callArgs: Kwargs, name?: string, outputTensorIndex?: number);
108}
109/**
110 * Constructor arguments for Node.
111 */
112export interface NodeArgs {
113 /**
114 * The layer that takes `inputTensors` and turns them into `outputTensors`.
115 * (the node gets created when the `call` method of the layer is called).
116 */
117 outboundLayer: Layer;
118 /**
119 * A list of layers, the same length as `inputTensors`, the layers from where
120 * `inputTensors` originate.
121 */
122 inboundLayers: Layer[];
123 /**
124 * A list of integers, the same length as `inboundLayers`. `nodeIndices[i]` is
125 * the origin node of `inputTensors[i]` (necessary since each inbound layer
126 * might have several nodes, e.g. if the layer is being shared with a
127 * different data stream).
128 */
129 nodeIndices: number[];
130 /**
131 * A list of integers, the same length as `inboundLayers`. `tensorIndices[i]`
132 * is the index of `inputTensors[i]` within the output of the inbound layer
133 * (necessary since each inbound layer might have multiple tensor outputs,
134 * with each one being independently manipulable).
135 */
136 tensorIndices: number[];
137 /** List of input tensors. */
138 inputTensors: SymbolicTensor[];
139 /** List of output tensors. */
140 outputTensors: SymbolicTensor[];
141 /** List of input masks (a mask can be a tensor, or null). */
142 inputMasks: Tensor[];
143 /** List of output masks (a mask can be a tensor, or null). */
144 outputMasks: Tensor[];
145 /** List of input shape tuples. */
146 inputShapes: Shape | Shape[];
147 /** List of output shape tuples. */
148 outputShapes: Shape | Shape[];
149}
150/**
151 * The type of the return value of Layer.dispose() and Container.dispose().
152 */
153export interface DisposeResult {
154 /**
155 * Reference count after the dispose call.
156 */
157 refCountAfterDispose: number;
158 /**
159 * Number of variables dispose in this dispose call.
160 */
161 numDisposedVariables: number;
162}
163/**
164 * A `Node` describes the connectivity between two layers.
165 *
166 * Each time a layer is connected to some new input,
167 * a node is added to `layer.inboundNodes`.
168 *
169 * Each time the output of a layer is used by another layer,
170 * a node is added to `layer.outboundNodes`.
171 *
172 * `nodeIndices` and `tensorIndices` are basically fine-grained coordinates
173 * describing the origin of the `inputTensors`, verifying the following:
174 *
175 * `inputTensors[i] ==
176 * inboundLayers[i].inboundNodes[nodeIndices[i]].outputTensors[
177 * tensorIndices[i]]`
178 *
179 * A node from layer A to layer B is added to:
180 * A.outboundNodes
181 * B.inboundNodes
182 */
183export declare class Node {
184 callArgs?: Kwargs;
185 /**
186 * The layer that takes `inputTensors` and turns them into `outputTensors`
187 * (the node gets created when the `call` method of the layer is called).
188 */
189 outboundLayer: Layer;
190 /**
191 * A list of layers, the same length as `inputTensors`, the layers from where
192 * `inputTensors` originate.
193 */
194 inboundLayers: Layer[];
195 /**
196 * A list of integers, the same length as `inboundLayers`. `nodeIndices[i]` is
197 * the origin node of `inputTensors[i]` (necessary since each inbound layer
198 * might have several nodes, e.g. if the layer is being shared with a
199 * different data stream).
200 */
201 nodeIndices: number[];
202 /**
203 * A list of integers, the same length as `inboundLayers`. `tensorIndices[i]`
204 * is the index of `inputTensors[i]` within the output of the inbound layer
205 * (necessary since each inbound layer might have multiple tensor outputs,
206 * with each one being independently manipulable).
207 */
208 tensorIndices: number[];
209 /** List of input tensors. */
210 inputTensors: SymbolicTensor[];
211 /** List of output tensors. */
212 outputTensors: SymbolicTensor[];
213 /** List of input masks (a mask can be a tensor, or null). */
214 inputMasks: Tensor[];
215 /** List of output masks (a mask can be a tensor, or null). */
216 outputMasks: Tensor[];
217 /** List of input shape tuples. */
218 inputShapes: Shape | Shape[];
219 /** List of output shape tuples. */
220 outputShapes: Shape | Shape[];
221 readonly id: number;
222 constructor(args: NodeArgs, callArgs?: Kwargs);
223 getConfig(): serialization.ConfigDict;
224}
225/** Constructor arguments for Layer. */
226export declare interface LayerArgs {
227 /**
228 * If defined, will be used to create an input layer to insert before this
229 * layer. If both `inputShape` and `batchInputShape` are defined,
230 * `batchInputShape` will be used. This argument is only applicable to input
231 * layers (the first layer of a model).
232 */
233 inputShape?: Shape;
234 /**
235 * If defined, will be used to create an input layer to insert before this
236 * layer. If both `inputShape` and `batchInputShape` are defined,
237 * `batchInputShape` will be used. This argument is only applicable to input
238 * layers (the first layer of a model).
239 */
240 batchInputShape?: Shape;
241 /**
242 * If `inputShape` is specified and `batchInputShape` is *not* specified,
243 * `batchSize` is used to construct the `batchInputShape`: `[batchSize,
244 * ...inputShape]`
245 */
246 batchSize?: number;
247 /**
248 * The data-type for this layer. Defaults to 'float32'.
249 * This argument is only applicable to input layers (the first layer of a
250 * model).
251 */
252 dtype?: DataType;
253 /** Name for this layer. */
254 name?: string;
255 /**
256 * Whether the weights of this layer are updatable by `fit`.
257 * Defaults to true.
258 */
259 trainable?: boolean;
260 /**
261 * Initial weight values of the layer.
262 */
263 weights?: Tensor[];
264 /** Legacy support. Do not use for new code. */
265 inputDType?: DataType;
266}
267export type CallHook = (inputs: Tensor | Tensor[], kwargs: Kwargs) => void;
268/**
269 * A layer is a grouping of operations and weights that can be composed to
270 * create a `tf.LayersModel`.
271 *
272 * Layers are constructed by using the functions under the
273 * [tf.layers](#Layers-Basic) namespace.
274 *
275 * @doc {heading: 'Layers', subheading: 'Classes', namespace: 'layers'}
276 */
277export declare abstract class Layer extends serialization.Serializable {
278 /** Name for this layer. Must be unique within a model. */
279 name: string;
280 /**
281 * List of InputSpec class instances.
282 *
283 * Each entry describes one required input:
284 * - ndim
285 * - dtype
286 * A layer with `n` input tensors must have an `inputSpec` of length `n`.
287 */
288 inputSpec: InputSpec[];
289 supportsMasking: boolean;
290 /** Whether the layer weights will be updated during training. */
291 protected trainable_: boolean;
292 batchInputShape: Shape;
293 dtype: DataType;
294 initialWeights: Tensor[];
295 inboundNodes: Node[];
296 outboundNodes: Node[];
297 activityRegularizer: Regularizer;
298 protected _trainableWeights: LayerVariable[];
299 private _nonTrainableWeights;
300 private _losses;
301 private _updates;
302 private _built;
303 private _callHook;
304 private _addedWeightNames;
305 readonly id: number;
306 protected _stateful: boolean;
307 protected _refCount: number | null;
308 private fastWeightInitDuringBuild;
309 constructor(args?: LayerArgs);
310 /**
311 * Converts a layer and its index to a unique (immutable type) name.
312 * This function is used internally with `this.containerNodes`.
313 * @param layer The layer.
314 * @param nodeIndex The layer's position (e.g. via enumerate) in a list of
315 * nodes.
316 *
317 * @returns The unique name.
318 */
319 protected static nodeKey(layer: Layer, nodeIndex: number): string;
320 /**
321 * Returns this.inboundNode at index nodeIndex.
322 *
323 * Porting note: This is a replacement for _get_node_attribute_at_index()
324 * @param nodeIndex
325 * @param attrName The name of the attribute related to request for this node.
326 */
327 private getNodeAtIndex;
328 /**
329 * Retrieves the input tensor(s) of a layer at a given node.
330 *
331 * @param nodeIndex Integer, index of the node from which to retrieve the
332 * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer
333 * was called.
334 *
335 * @return A tensor (or list of tensors if the layer has multiple inputs).
336 */
337 getInputAt(nodeIndex: number): SymbolicTensor | SymbolicTensor[];
338 /**
339 * Retrieves the output tensor(s) of a layer at a given node.
340 *
341 * @param nodeIndex Integer, index of the node from which to retrieve the
342 * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer
343 * was called.
344 *
345 * @return A tensor (or list of tensors if the layer has multiple outputs).
346 */
347 getOutputAt(nodeIndex: number): SymbolicTensor | SymbolicTensor[];
348 /**
349 * Retrieves the input tensor(s) of a layer.
350 *
351 * Only applicable if the layer has exactly one inbound node,
352 * i.e. if it is connected to one incoming layer.
353 *
354 * @return Input tensor or list of input tensors.
355 *
356 * @exception AttributeError if the layer is connected to more than one
357 * incoming layers.
358 */
359 get input(): SymbolicTensor | SymbolicTensor[];
360 /**
361 * Retrieves the output tensor(s) of a layer.
362 *
363 * Only applicable if the layer has exactly one inbound node,
364 * i.e. if it is connected to one incoming layer.
365 *
366 * @return Output tensor or list of output tensors.
367 *
368 * @exception AttributeError if the layer is connected to more than one
369 * incoming layers.
370 */
371 get output(): SymbolicTensor | SymbolicTensor[];
372 get losses(): RegularizerFn[];
373 /**
374 * Retrieves the Layer's current loss values.
375 *
376 * Used for regularizers during training.
377 */
378 calculateLosses(): Scalar[];
379 get updates(): Tensor[];
380 get built(): boolean;
381 set built(built: boolean);
382 get trainable(): boolean;
383 set trainable(trainable: boolean);
384 get trainableWeights(): LayerVariable[];
385 set trainableWeights(weights: LayerVariable[]);
386 get nonTrainableWeights(): LayerVariable[];
387 set nonTrainableWeights(weights: LayerVariable[]);
388 /**
389 * The concatenation of the lists trainableWeights and nonTrainableWeights
390 * (in this order).
391 */
392 get weights(): LayerVariable[];
393 get stateful(): boolean;
394 /**
395 * Reset the states of the layer.
396 *
397 * This method of the base Layer class is essentially a no-op.
398 * Subclasses that are stateful (e.g., stateful RNNs) should override this
399 * method.
400 */
401 resetStates(): void;
402 /**
403 * Checks compatibility between the layer and provided inputs.
404 *
405 * This checks that the tensor(s) `input`
406 * verify the input assumptions of the layer
407 * (if any). If not, exceptions are raised.
408 *
409 * @param inputs Input tensor or list of input tensors.
410 *
411 * @exception ValueError in case of mismatch between
412 * the provided inputs and the expectations of the layer.
413 */
414 protected assertInputCompatibility(inputs: Tensor | Tensor[] | SymbolicTensor | SymbolicTensor[]): void;
415 /**
416 * This is where the layer's logic lives.
417 *
418 * @param inputs Input tensor, or list/tuple of input tensors.
419 * @param kwargs Additional keyword arguments.
420 *
421 * @return A tensor or list/tuple of tensors.
422 */
423 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
424 protected invokeCallHook(inputs: Tensor | Tensor[], kwargs: Kwargs): void;
425 /**
426 * Set call hook.
427 * This is currently used for testing only.
428 * @param callHook
429 */
430 setCallHook(callHook: CallHook): void;
431 /**
432 * Clear call hook.
433 * This is currently used for testing only.
434 */
435 clearCallHook(): void;
436 /**
437 * Builds or executes a `Layer`'s logic.
438 *
439 * When called with `tf.Tensor`(s), execute the `Layer`'s computation and
440 * return Tensor(s). For example:
441 *
442 * ```js
443 * const denseLayer = tf.layers.dense({
444 * units: 1,
445 * kernelInitializer: 'zeros',
446 * useBias: false
447 * });
448 *
449 * // Invoke the layer's apply() method with a `tf.Tensor` (with concrete
450 * // numeric values).
451 * const input = tf.ones([2, 2]);
452 * const output = denseLayer.apply(input);
453 *
454 * // The output's value is expected to be [[0], [0]], due to the fact that
455 * // the dense layer has a kernel initialized to all-zeros and does not have
456 * // a bias.
457 * output.print();
458 * ```
459 *
460 * When called with `tf.SymbolicTensor`(s), this will prepare the layer for
461 * future execution. This entails internal book-keeping on shapes of
462 * expected Tensors, wiring layers together, and initializing weights.
463 *
464 * Calling `apply` with `tf.SymbolicTensor`s are typically used during the
465 * building of non-`tf.Sequential` models. For example:
466 *
467 * ```js
468 * const flattenLayer = tf.layers.flatten();
469 * const denseLayer = tf.layers.dense({units: 1});
470 *
471 * // Use tf.layers.input() to obtain a SymbolicTensor as input to apply().
472 * const input = tf.input({shape: [2, 2]});
473 * const output1 = flattenLayer.apply(input);
474 *
475 * // output1.shape is [null, 4]. The first dimension is the undetermined
476 * // batch size. The second dimension comes from flattening the [2, 2]
477 * // shape.
478 * console.log(JSON.stringify(output1.shape));
479 *
480 * // The output SymbolicTensor of the flatten layer can be used to call
481 * // the apply() of the dense layer:
482 * const output2 = denseLayer.apply(output1);
483 *
484 * // output2.shape is [null, 1]. The first dimension is the undetermined
485 * // batch size. The second dimension matches the number of units of the
486 * // dense layer.
487 * console.log(JSON.stringify(output2.shape));
488 *
489 * // The input and output can be used to construct a model that consists
490 * // of the flatten and dense layers.
491 * const model = tf.model({inputs: input, outputs: output2});
492 * ```
493 *
494 * @param inputs a `tf.Tensor` or `tf.SymbolicTensor` or an Array of them.
495 * @param kwargs Additional keyword arguments to be passed to `call()`.
496 *
497 * @return Output of the layer's `call` method.
498 *
499 * @exception ValueError error in case the layer is missing shape information
500 * for its `build` call.
501 *
502 * @doc {heading: 'Models', 'subheading': 'Classes'}
503 */
504 apply(inputs: Tensor | Tensor[] | SymbolicTensor | SymbolicTensor[], kwargs?: Kwargs): Tensor | Tensor[] | SymbolicTensor | SymbolicTensor[];
505 /**
506 * Check compatibility between input shape and this layer's batchInputShape.
507 *
508 * Print warning if any incompatibility is found.
509 *
510 * @param inputShape Input shape to be checked.
511 */
512 protected warnOnIncompatibleInputShape(inputShape: Shape): void;
513 /**
514 * Retrieves the output shape(s) of a layer.
515 *
516 * Only applicable if the layer has only one inbound node, or if all inbound
517 * nodes have the same output shape.
518 *
519 * @returns Output shape or shapes.
520 * @throws AttributeError: if the layer is connected to more than one incoming
521 * nodes.
522 *
523 * @doc {heading: 'Models', 'subheading': 'Classes'}
524 */
525 get outputShape(): Shape | Shape[];
526 /**
527 * Counts the total number of numbers (e.g., float32, int32) in the
528 * weights.
529 *
530 * @returns An integer count.
531 * @throws RuntimeError: If the layer is not built yet (in which case its
532 * weights are not defined yet.)
533 *
534 * @doc {heading: 'Models', 'subheading': 'Classes'}
535 */
536 countParams(): number;
537 /**
538 * Creates the layer weights.
539 *
540 * Must be implemented on all layers that have weights.
541 *
542 * Called when apply() is called to construct the weights.
543 *
544 * @param inputShape A `Shape` or array of `Shape` (unused).
545 *
546 * @doc {heading: 'Models', 'subheading': 'Classes'}
547 */
548 build(inputShape: Shape | Shape[]): void;
549 /**
550 * Returns the current values of the weights of the layer.
551 *
552 * @param trainableOnly Whether to get the values of only trainable weights.
553 * @returns Weight values as an `Array` of `tf.Tensor`s.
554 *
555 * @doc {heading: 'Models', 'subheading': 'Classes'}
556 */
557 getWeights(trainableOnly?: boolean): Tensor[];
558 /**
559 * Sets the weights of the layer, from Tensors.
560 *
561 * @param weights a list of Tensors. The number of arrays and their shape
562 * must match number of the dimensions of the weights of the layer (i.e.
563 * it should match the output of `getWeights`).
564 *
565 * @exception ValueError If the provided weights list does not match the
566 * layer's specifications.
567 *
568 * @doc {heading: 'Models', 'subheading': 'Classes'}
569 */
570 setWeights(weights: Tensor[]): void;
571 /**
572 * Adds a weight variable to the layer.
573 *
574 * @param name Name of the new weight variable.
575 * @param shape The shape of the weight.
576 * @param dtype The dtype of the weight.
577 * @param initializer An initializer instance.
578 * @param regularizer A regularizer instance.
579 * @param trainable Whether the weight should be trained via backprop or not
580 * (assuming that the layer itself is also trainable).
581 * @param constraint An optional trainable.
582 * @return The created weight variable.
583 *
584 * @doc {heading: 'Models', 'subheading': 'Classes'}
585 */
586 protected addWeight(name: string, shape: Shape, dtype?: DataType, initializer?: Initializer, regularizer?: Regularizer, trainable?: boolean, constraint?: Constraint, getInitializerFunc?: Function): LayerVariable;
587 /**
588 * Set the fast-weight-initialization flag.
589 *
590 * In cases where the initialized weight values will be immediately
591 * overwritten by loaded weight values during model loading, setting
592 * the flag to `true` saves unnecessary calls to potentially expensive
593 * initializers and speeds up the loading process.
594 *
595 * @param value Target value of the flag.
596 */
597 setFastWeightInitDuringBuild(value: boolean): void;
598 /**
599 * Add losses to the layer.
600 *
601 * The loss may potentially be conditional on some inputs tensors,
602 * for instance activity losses are conditional on the layer's inputs.
603 *
604 * @doc {heading: 'Models', 'subheading': 'Classes'}
605 */
606 addLoss(losses: RegularizerFn | RegularizerFn[]): void;
607 /**
608 * Computes the output shape of the layer.
609 *
610 * Assumes that the layer will be built to match that input shape provided.
611 *
612 * @param inputShape A shape (tuple of integers) or a list of shape tuples
613 * (one per output tensor of the layer). Shape tuples can include null for
614 * free dimensions, instead of an integer.
615 *
616 * @doc {heading: 'Models', 'subheading': 'Classes'}
617 */
618 computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
619 /**
620 * Computes an output mask tensor.
621 *
622 * @param inputs Tensor or list of tensors.
623 * @param mask Tensor or list of tensors.
624 *
625 * @return null or a tensor (or list of tensors, one per output tensor of the
626 * layer).
627 */
628 computeMask(inputs: Tensor | Tensor[], mask?: Tensor | Tensor[]): Tensor | Tensor[];
629 private setMaskMetadata;
630 /**
631 * Internal method to create an inbound node for the layer.
632 *
633 * @param inputTensors List of input tensors.
634 * @param outputTensors List of output tensors.
635 * @param inputMasks List of input masks (a mask can be a tensor, or null).
636 * @param outputMasks List of output masks (a mask can be a tensor, or null).
637 * @param inputShapes List of input shape tuples.
638 * @param outputShapes List of output shape tuples.
639 * @param kwargs Dictionary of keyword arguments that were passed to the
640 * `call` method of the layer at the call that created the node.
641 */
642 private addInboundNode;
643 /**
644 * Returns the config of the layer.
645 *
646 * A layer config is a TS dictionary (serializable)
647 * containing the configuration of a layer.
648 * The same layer can be reinstantiated later
649 * (without its trained weights) from this configuration.
650 *
651 * The config of a layer does not include connectivity
652 * information, nor the layer class name. These are handled
653 * by 'Container' (one layer of abstraction above).
654 *
655 * Porting Note: The TS dictionary follows TS naming standards for
656 * keys, and uses tfjs-layers type-safe Enums. Serialization methods
657 * should use a helper function to convert to the pythonic storage
658 * standard. (see serialization_utils.convertTsToPythonic)
659 *
660 * @returns TS dictionary of configuration.
661 *
662 * @doc {heading: 'Models', 'subheading': 'Classes'}
663 */
664 getConfig(): serialization.ConfigDict;
665 /**
666 * Dispose the weight variables that this Layer instance holds.
667 *
668 * @returns {number} Number of disposed variables.
669 */
670 protected disposeWeights(): number;
671 protected assertNotDisposed(): void;
672 /**
673 * Attempt to dispose layer's weights.
674 *
675 * This method decreases the reference count of the Layer object by 1.
676 *
677 * A Layer is reference-counted. Its reference count is incremented by 1
678 * the first item its `apply()` method is called and when it becomes a part
679 * of a new `Node` (through calling the `apply()` method on a
680 * `tf.SymbolicTensor`).
681 *
682 * If the reference count of a Layer becomes 0, all the weights will be
683 * disposed and the underlying memory (e.g., the textures allocated in WebGL)
684 * will be freed.
685 *
686 * Note: If the reference count is greater than 0 after the decrement, the
687 * weights of the Layer will *not* be disposed.
688 *
689 * After a Layer is disposed, it cannot be used in calls such as `apply()`,
690 * `getWeights()` or `setWeights()` anymore.
691 *
692 * @returns A DisposeResult Object with the following fields:
693 * - refCountAfterDispose: The reference count of the Container after this
694 * `dispose()` call.
695 * - numDisposedVariables: Number of `tf.Variable`s (i.e., weights) disposed
696 * during this `dispose()` call.
697 * @throws {Error} If the layer is not built yet, or if the layer has already
698 * been disposed.
699 *
700 * @doc {heading: 'Models', 'subheading': 'Classes'}
701 */
702 dispose(): DisposeResult;
703}
704/**
705 * Returns the list of input tensors necessary to compute `tensor`.
706 *
707 * Output will always be a list of tensors (potentially with 1 element).
708 *
709 * @param tensor The tensor to start from.
710 * @param layer Origin layer of the tensor.
711 * @param nodeIndex Origin node index of the tensor.
712 *
713 * @return Array of input tensors.
714 */
715export declare function getSourceInputs(tensor: SymbolicTensor, layer?: Layer, nodeIndex?: number): SymbolicTensor[];
716
\No newline at end of file