UNPKG

26.3 kBTypeScriptView Raw
1/**
2 * @license
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 * =============================================================================
9 */
10/// <amd-module name="@tensorflow/tfjs-layers/dist/engine/topology" />
11import { DataType, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';
12import { Constraint } from '../constraints';
13import { Initializer } from '../initializers';
14import { Shape } from '../keras_format/common';
15import { Regularizer } from '../regularizers';
16import { Kwargs, RegularizerFn } from '../types';
17import { LayerVariable } from '../variables';
18export declare type Op = (x: LayerVariable) => LayerVariable;
19/**
20 * Constructor arguments for InputSpec.
21 */
22export interface InputSpecArgs {
23 /** Expected datatype of the input. */
24 dtype?: DataType;
25 /** Expected shape of the input (may include null for unchecked axes). */
26 shape?: Shape;
27 /** Expected rank of the input. */
28 ndim?: number;
29 /** Maximum rank of the input. */
30 maxNDim?: number;
31 /** Minimum rank of the input. */
32 minNDim?: number;
33 /** Dictionary mapping integer axes to a specific dimension value. */
34 axes?: {
35 [axis: number]: number;
36 };
37}
38/**
39 * Specifies the ndim, dtype and shape of every input to a layer.
40 *
41 * Every layer should expose (if appropriate) an `inputSpec` attribute:
42 * a list of instances of InputSpec (one per input tensor).
43 *
44 * A null entry in a shape is compatible with any dimension,
45 * a null shape is compatible with any shape.
46 */
47export declare class InputSpec {
48 /** Expected datatype of the input. */
49 dtype?: DataType;
50 /** Expected shape of the input (may include null for unchecked axes). */
51 shape?: Shape;
52 /** Expected rank of the input. */
53 ndim?: number;
54 /** Maximum rank of the input. */
55 maxNDim?: number;
56 /** Minimum rank of the input. */
57 minNDim?: number;
58 /** Dictionary mapping integer axes to a specific dimension value. */
59 axes?: {
60 [axis: number]: number;
61 };
62 constructor(args: InputSpecArgs);
63}
64/**
65 * `tf.SymbolicTensor` is a placeholder for a Tensor without any concrete value.
66 *
67 * They are most often encountered when building a graph of `Layer`s for a
68 * a `tf.LayersModel` and the input data's shape, but not values are known.
69 *
70 * @doc {heading: 'Models', 'subheading': 'Classes'}
71 */
72export declare class SymbolicTensor {
73 readonly dtype: DataType;
74 readonly shape: Shape;
75 sourceLayer: Layer;
76 readonly inputs: SymbolicTensor[];
77 readonly callArgs: Kwargs;
78 readonly outputTensorIndex?: number;
79 readonly id: number;
80 readonly name: string;
81 readonly originalName?: string;
82 /**
83 * Rank/dimensionality of the tensor.
84 */
85 readonly rank: number;
86 /**
87 * Replacement for _keras_history.
88 */
89 nodeIndex: number;
90 /**
91 * Replacement for _keras_history.
92 */
93 tensorIndex: number;
94 /**
95 *
96 * @param dtype
97 * @param shape
98 * @param sourceLayer The Layer that produced this symbolic tensor.
99 * @param inputs The inputs passed to sourceLayer's __call__() method.
100 * @param nodeIndex
101 * @param tensorIndex
102 * @param callArgs The keyword arguments passed to the __call__() method.
103 * @param name
104 * @param outputTensorIndex The index of this tensor in the list of outputs
105 * returned by apply().
106 */
107 constructor(dtype: DataType, shape: Shape, sourceLayer: Layer, inputs: SymbolicTensor[], callArgs: Kwargs, name?: string, outputTensorIndex?: number);
108}
109/**
110 * Constructor arguments for Node.
111 */
112export interface NodeArgs {
113 /**
114 * The layer that takes `inputTensors` and turns them into `outputTensors`.
115 * (the node gets created when the `call` method of the layer is called).
116 */
117 outboundLayer: Layer;
118 /**
119 * A list of layers, the same length as `inputTensors`, the layers from where
120 * `inputTensors` originate.
121 */
122 inboundLayers: Layer[];
123 /**
124 * A list of integers, the same length as `inboundLayers`. `nodeIndices[i]` is
125 * the origin node of `inputTensors[i]` (necessary since each inbound layer
126 * might have several nodes, e.g. if the layer is being shared with a
127 * different data stream).
128 */
129 nodeIndices: number[];
130 /**
131 * A list of integers, the same length as `inboundLayers`. `tensorIndices[i]`
132 * is the index of `inputTensors[i]` within the output of the inbound layer
133 * (necessary since each inbound layer might have multiple tensor outputs,
134 * with each one being independently manipulable).
135 */
136 tensorIndices: number[];
137 /** List of input tensors. */
138 inputTensors: SymbolicTensor[];
139 /** List of output tensors. */
140 outputTensors: SymbolicTensor[];
141 /** List of input masks (a mask can be a tensor, or null). */
142 inputMasks: Tensor[];
143 /** List of output masks (a mask can be a tensor, or null). */
144 outputMasks: Tensor[];
145 /** List of input shape tuples. */
146 inputShapes: Shape | Shape[];
147 /** List of output shape tuples. */
148 outputShapes: Shape | Shape[];
149}
150/**
151 * The type of the return value of Layer.dispose() and Container.dispose().
152 */
153export interface DisposeResult {
154 /**
155 * Reference count after the dispose call.
156 */
157 refCountAfterDispose: number;
158 /**
159 * Number of variables dispose in this dispose call.
160 */
161 numDisposedVariables: number;
162}
163/**
164 * A `Node` describes the connectivity between two layers.
165 *
166 * Each time a layer is connected to some new input,
167 * a node is added to `layer.inboundNodes`.
168 *
169 * Each time the output of a layer is used by another layer,
170 * a node is added to `layer.outboundNodes`.
171 *
172 * `nodeIndices` and `tensorIndices` are basically fine-grained coordinates
173 * describing the origin of the `inputTensors`, verifying the following:
174 *
175 * `inputTensors[i] ==
176 * inboundLayers[i].inboundNodes[nodeIndices[i]].outputTensors[
177 * tensorIndices[i]]`
178 *
179 * A node from layer A to layer B is added to:
180 * A.outboundNodes
181 * B.inboundNodes
182 */
183export declare class Node {
184 callArgs?: Kwargs;
185 /**
186 * The layer that takes `inputTensors` and turns them into `outputTensors`
187 * (the node gets created when the `call` method of the layer is called).
188 */
189 outboundLayer: Layer;
190 /**
191 * A list of layers, the same length as `inputTensors`, the layers from where
192 * `inputTensors` originate.
193 */
194 inboundLayers: Layer[];
195 /**
196 * A list of integers, the same length as `inboundLayers`. `nodeIndices[i]` is
197 * the origin node of `inputTensors[i]` (necessary since each inbound layer
198 * might have several nodes, e.g. if the layer is being shared with a
199 * different data stream).
200 */
201 nodeIndices: number[];
202 /**
203 * A list of integers, the same length as `inboundLayers`. `tensorIndices[i]`
204 * is the index of `inputTensors[i]` within the output of the inbound layer
205 * (necessary since each inbound layer might have multiple tensor outputs,
206 * with each one being independently manipulable).
207 */
208 tensorIndices: number[];
209 /** List of input tensors. */
210 inputTensors: SymbolicTensor[];
211 /** List of output tensors. */
212 outputTensors: SymbolicTensor[];
213 /** List of input masks (a mask can be a tensor, or null). */
214 inputMasks: Tensor[];
215 /** List of output masks (a mask can be a tensor, or null). */
216 outputMasks: Tensor[];
217 /** List of input shape tuples. */
218 inputShapes: Shape | Shape[];
219 /** List of output shape tuples. */
220 outputShapes: Shape | Shape[];
221 readonly id: number;
222 constructor(args: NodeArgs, callArgs?: Kwargs);
223 getConfig(): serialization.ConfigDict;
224}
225/** Constructor arguments for Layer. */
226export declare interface LayerArgs {
227 /**
228 * If defined, will be used to create an input layer to insert before this
229 * layer. If both `inputShape` and `batchInputShape` are defined,
230 * `batchInputShape` will be used. This argument is only applicable to input
231 * layers (the first layer of a model).
232 */
233 inputShape?: Shape;
234 /**
235 * If defined, will be used to create an input layer to insert before this
236 * layer. If both `inputShape` and `batchInputShape` are defined,
237 * `batchInputShape` will be used. This argument is only applicable to input
238 * layers (the first layer of a model).
239 */
240 batchInputShape?: Shape;
241 /**
242 * If `inputShape` is specified and `batchInputShape` is *not* specified,
243 * `batchSize` is used to construct the `batchInputShape`: `[batchSize,
244 * ...inputShape]`
245 */
246 batchSize?: number;
247 /**
248 * The data-type for this layer. Defaults to 'float32'.
249 * This argument is only applicable to input layers (the first layer of a
250 * model).
251 */
252 dtype?: DataType;
253 /** Name for this layer. */
254 name?: string;
255 /**
256 * Whether the weights of this layer are updatable by `fit`.
257 * Defaults to true.
258 */
259 trainable?: boolean;
260 /**
261 * Initial weight values of the layer.
262 */
263 weights?: Tensor[];
264 /** Legacy support. Do not use for new code. */
265 inputDType?: DataType;
266}
267export declare type CallHook = (inputs: Tensor | Tensor[], kwargs: Kwargs) => void;
268/**
269 * A layer is a grouping of operations and weights that can be composed to
270 * create a `tf.LayersModel`.
271 *
272 * Layers are constructed by using the functions under the
273 * [tf.layers](#Layers-Basic) namespace.
274 *
275 * @doc {heading: 'Layers', subheading: 'Classes', namespace: 'layers'}
276 */
277export declare abstract class Layer extends serialization.Serializable {
278 /** Name for this layer. Must be unique within a model. */
279 name: string;
280 /**
281 * List of InputSpec class instances.
282 *
283 * Each entry describes one required input:
284 * - ndim
285 * - dtype
286 * A layer with `n` input tensors must have an `inputSpec` of length `n`.
287 */
288 inputSpec: InputSpec[];
289 supportsMasking: boolean;
290 /** Whether the layer weights will be updated during training. */
291 protected trainable_: boolean;
292 batchInputShape: Shape;
293 dtype: DataType;
294 initialWeights: Tensor[];
295 inboundNodes: Node[];
296 outboundNodes: Node[];
297 activityRegularizer: Regularizer;
298 protected _trainableWeights: LayerVariable[];
299 private _nonTrainableWeights;
300 private _losses;
301 private _updates;
302 private _built;
303 private _callHook;
304 private _addedWeightNames;
305 readonly id: number;
306 protected _stateful: boolean;
307 protected _refCount: number | null;
308 private fastWeightInitDuringBuild;
309 constructor(args?: LayerArgs);
310 /**
311 * Converts a layer and its index to a unique (immutable type) name.
312 * This function is used internally with `this.containerNodes`.
313 * @param layer The layer.
314 * @param nodeIndex The layer's position (e.g. via enumerate) in a list of
315 * nodes.
316 *
317 * @returns The unique name.
318 */
319 protected static nodeKey(layer: Layer, nodeIndex: number): string;
320 /**
321 * Returns this.inboundNode at index nodeIndex.
322 *
323 * Porting note: This is a replacement for _get_node_attribute_at_index()
324 * @param nodeIndex
325 * @param attrName The name of the attribute related to request for this node.
326 */
327 private getNodeAtIndex;
328 /**
329 * Retrieves the input tensor(s) of a layer at a given node.
330 *
331 * @param nodeIndex Integer, index of the node from which to retrieve the
332 * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer
333 * was called.
334 *
335 * @return A tensor (or list of tensors if the layer has multiple inputs).
336 */
337 getInputAt(nodeIndex: number): SymbolicTensor | SymbolicTensor[];
338 /**
339 * Retrieves the output tensor(s) of a layer at a given node.
340 *
341 * @param nodeIndex Integer, index of the node from which to retrieve the
342 * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer
343 * was called.
344 *
345 * @return A tensor (or list of tensors if the layer has multiple outputs).
346 */
347 getOutputAt(nodeIndex: number): SymbolicTensor | SymbolicTensor[];
348 /**
349 * Retrieves the input tensor(s) of a layer.
350 *
351 * Only applicable if the layer has exactly one inbound node,
352 * i.e. if it is connected to one incoming layer.
353 *
354 * @return Input tensor or list of input tensors.
355 *
356 * @exception AttributeError if the layer is connected to more than one
357 * incoming layers.
358 */
359 readonly input: SymbolicTensor | SymbolicTensor[];
360 /**
361 * Retrieves the output tensor(s) of a layer.
362 *
363 * Only applicable if the layer has exactly one inbound node,
364 * i.e. if it is connected to one incoming layer.
365 *
366 * @return Output tensor or list of output tensors.
367 *
368 * @exception AttributeError if the layer is connected to more than one
369 * incoming layers.
370 */
371 readonly output: SymbolicTensor | SymbolicTensor[];
372 readonly losses: RegularizerFn[];
373 /**
374 * Retrieves the Layer's current loss values.
375 *
376 * Used for regularizers during training.
377 */
378 calculateLosses(): Scalar[];
379 readonly updates: Tensor[];
380 built: boolean;
381 trainable: boolean;
382 trainableWeights: LayerVariable[];
383 nonTrainableWeights: LayerVariable[];
384 /**
385 * The concatenation of the lists trainableWeights and nonTrainableWeights
386 * (in this order).
387 */
388 readonly weights: LayerVariable[];
389 readonly stateful: boolean;
390 /**
391 * Reset the states of the layer.
392 *
393 * This method of the base Layer class is essentially a no-op.
394 * Subclasses that are stateful (e.g., stateful RNNs) should override this
395 * method.
396 */
397 resetStates(): void;
398 /**
399 * Checks compatibility between the layer and provided inputs.
400 *
401 * This checks that the tensor(s) `input`
402 * verify the input assumptions of the layer
403 * (if any). If not, exceptions are raised.
404 *
405 * @param inputs Input tensor or list of input tensors.
406 *
407 * @exception ValueError in case of mismatch between
408 * the provided inputs and the expectations of the layer.
409 */
410 protected assertInputCompatibility(inputs: Tensor | Tensor[] | SymbolicTensor | SymbolicTensor[]): void;
411 /**
412 * This is where the layer's logic lives.
413 *
414 * @param inputs Input tensor, or list/tuple of input tensors.
415 * @param kwargs Additional keyword arguments.
416 *
417 * @return A tensor or list/tuple of tensors.
418 */
419 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
420 protected invokeCallHook(inputs: Tensor | Tensor[], kwargs: Kwargs): void;
421 /**
422 * Set call hook.
423 * This is currently used for testing only.
424 * @param callHook
425 */
426 setCallHook(callHook: CallHook): void;
427 /**
428 * Clear call hook.
429 * This is currently used for testing only.
430 */
431 clearCallHook(): void;
432 /**
433 * Builds or executes a `Layer's logic.
434 *
435 * When called with `tf.Tensor`(s), execute the `Layer`s computation and
436 * return Tensor(s). For example:
437 *
438 * ```js
439 * const denseLayer = tf.layers.dense({
440 * units: 1,
441 * kernelInitializer: 'zeros',
442 * useBias: false
443 * });
444 *
445 * // Invoke the layer's apply() method with a `tf.Tensor` (with concrete
446 * // numeric values).
447 * const input = tf.ones([2, 2]);
448 * const output = denseLayer.apply(input);
449 *
450 * // The output's value is expected to be [[0], [0]], due to the fact that
451 * // the dense layer has a kernel initialized to all-zeros and does not have
452 * // a bias.
453 * output.print();
454 * ```
455 *
456 * When called with `tf.SymbolicTensor`(s), this will prepare the layer for
457 * future execution. This entails internal book-keeping on shapes of
458 * expected Tensors, wiring layers together, and initializing weights.
459 *
460 * Calling `apply` with `tf.SymbolicTensor`s are typically used during the
461 * building of non-`tf.Sequential` models. For example:
462 *
463 * ```js
464 * const flattenLayer = tf.layers.flatten();
465 * const denseLayer = tf.layers.dense({units: 1});
466 *
467 * // Use tf.layers.input() to obtain a SymbolicTensor as input to apply().
468 * const input = tf.input({shape: [2, 2]});
469 * const output1 = flattenLayer.apply(input);
470 *
471 * // output1.shape is [null, 4]. The first dimension is the undetermined
472 * // batch size. The second dimension comes from flattening the [2, 2]
473 * // shape.
474 * console.log(JSON.stringify(output1.shape));
475 *
476 * // The output SymbolicTensor of the flatten layer can be used to call
477 * // the apply() of the dense layer:
478 * const output2 = denseLayer.apply(output1);
479 *
480 * // output2.shape is [null, 1]. The first dimension is the undetermined
481 * // batch size. The second dimension matches the number of units of the
482 * // dense layer.
483 * console.log(JSON.stringify(output2.shape));
484 *
485 * // The input and output and be used to construct a model that consists
486 * // of the flatten and dense layers.
487 * const model = tf.model({inputs: input, outputs: output2});
488 * ```
489 *
490 * @param inputs a `tf.Tensor` or `tf.SymbolicTensor` or an Array of them.
491 * @param kwargs Additional keyword arguments to be passed to `call()`.
492 *
493 * @return Output of the layer's `call` method.
494 *
495 * @exception ValueError error in case the layer is missing shape information
496 * for its `build` call.
497 *
498 * @doc {heading: 'Models', 'subheading': 'Classes'}
499 */
500 apply(inputs: Tensor | Tensor[] | SymbolicTensor | SymbolicTensor[], kwargs?: Kwargs): Tensor | Tensor[] | SymbolicTensor | SymbolicTensor[];
501 /**
502 * Check compatibility between input shape and this layer's batchInputShape.
503 *
504 * Print warning if any incompatibility is found.
505 *
506 * @param inputShape Input shape to be checked.
507 */
508 protected warnOnIncompatibleInputShape(inputShape: Shape): void;
509 /**
510 * Retrieves the output shape(s) of a layer.
511 *
512 * Only applicable if the layer has only one inbound node, or if all inbound
513 * nodes have the same output shape.
514 *
515 * @returns Output shape or shapes.
516 * @throws AttributeError: if the layer is connected to more than one incoming
517 * nodes.
518 *
519 * @doc {heading: 'Models', 'subheading': 'Classes'}
520 */
521 readonly outputShape: Shape | Shape[];
522 /**
523 * Counts the total number of numbers (e.g., float32, int32) in the
524 * weights.
525 *
526 * @returns An integer count.
527 * @throws RuntimeError: If the layer is not built yet (in which case its
528 * weights are not defined yet.)
529 *
530 * @doc {heading: 'Models', 'subheading': 'Classes'}
531 */
532 countParams(): number;
533 /**
534 * Creates the layer weights.
535 *
536 * Must be implemented on all layers that have weights.
537 *
538 * Called when apply() is called to construct the weights.
539 *
540 * @param inputShape A `Shape` or array of `Shape` (unused).
541 *
542 * @doc {heading: 'Models', 'subheading': 'Classes'}
543 */
544 build(inputShape: Shape | Shape[]): void;
545 /**
546 * Returns the current values of the weights of the layer.
547 *
548 * @param trainableOnly Whether to get the values of only trainable weights.
549 * @returns Weight values as an `Array` of `tf.Tensor`s.
550 *
551 * @doc {heading: 'Models', 'subheading': 'Classes'}
552 */
553 getWeights(trainableOnly?: boolean): Tensor[];
554 /**
555 * Sets the weights of the layer, from Tensors.
556 *
557 * @param weights a list of Tensors. The number of arrays and their shape
558 * must match number of the dimensions of the weights of the layer (i.e.
559 * it should match the output of `getWeights`).
560 *
561 * @exception ValueError If the provided weights list does not match the
562 * layer's specifications.
563 *
564 * @doc {heading: 'Models', 'subheading': 'Classes'}
565 */
566 setWeights(weights: Tensor[]): void;
567 /**
568 * Adds a weight variable to the layer.
569 *
570 * @param name Name of the new weight variable.
571 * @param shape The shape of the weight.
572 * @param dtype The dtype of the weight.
573 * @param initializer An initializer instance.
574 * @param regularizer A regularizer instance.
575 * @param trainable Whether the weight should be trained via backprop or not
576 * (assuming that the layer itself is also trainable).
577 * @param constraint An optional trainable.
578 * @return The created weight variable.
579 *
580 * @doc {heading: 'Models', 'subheading': 'Classes'}
581 */
582 protected addWeight(name: string, shape: Shape, dtype?: DataType, initializer?: Initializer, regularizer?: Regularizer, trainable?: boolean, constraint?: Constraint, getInitializerFunc?: Function): LayerVariable;
583 /**
584 * Set the fast-weight-initialization flag.
585 *
586 * In cases where the initialized weight values will be immediately
587 * overwritten by loaded weight values during model loading, setting
588 * the flag to `true` saves unnecessary calls to potentially expensive
589 * initializers and speeds up the loading process.
590 *
591 * @param value Target value of the flag.
592 */
593 setFastWeightInitDuringBuild(value: boolean): void;
594 /**
595 * Add losses to the layer.
596 *
597 * The loss may potentionally be conditional on some inputs tensors,
598 * for instance activity losses are conditional on the layer's inputs.
599 *
600 * @doc {heading: 'Models', 'subheading': 'Classes'}
601 */
602 addLoss(losses: RegularizerFn | RegularizerFn[]): void;
603 /**
604 * Computes the output shape of the layer.
605 *
606 * Assumes that the layer will be built to match that input shape provided.
607 *
608 * @param inputShape A shape (tuple of integers) or a list of shape tuples
609 * (one per output tensor of the layer). Shape tuples can include null for
610 * free dimensions, instead of an integer.
611 *
612 * @doc {heading: 'Models', 'subheading': 'Classes'}
613 */
614 computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
615 /**
616 * Computes an output mask tensor.
617 *
618 * @param inputs Tensor or list of tensors.
619 * @param mask Tensor or list of tensors.
620 *
621 * @return null or a tensor (or list of tensors, one per output tensor of the
622 * layer).
623 */
624 computeMask(inputs: Tensor | Tensor[], mask?: Tensor | Tensor[]): Tensor | Tensor[];
625 /**
626 * Internal method to create an inbound node for the layer.
627 *
628 * @param inputTensors List of input tensors.
629 * @param outputTensors List of output tensors.
630 * @param inputMasks List of input masks (a mask can be a tensor, or null).
631 * @param outputMasks List of output masks (a mask can be a tensor, or null).
632 * @param inputShapes List of input shape tuples.
633 * @param outputShapes List of output shape tuples.
634 * @param kwargs Dictionary of keyword arguments that were passed to the
635 * `call` method of the layer at the call that created the node.
636 */
637 private addInboundNode;
638 /**
639 * Returns the config of the layer.
640 *
641 * A layer config is a TS dictionary (serializable)
642 * containing the configuration of a layer.
643 * The same layer can be reinstantiated later
644 * (without its trained weights) from this configuration.
645 *
646 * The config of a layer does not include connectivity
647 * information, nor the layer class name. These are handled
648 * by 'Container' (one layer of abstraction above).
649 *
650 * Porting Note: The TS dictionary follows TS naming standrds for
651 * keys, and uses tfjs-layers type-safe Enums. Serialization methods
652 * should use a helper function to convert to the pythonic storage
653 * standard. (see serialization_utils.convertTsToPythonic)
654 *
655 * @returns TS dictionary of configuration.
656 *
657 * @doc {heading: 'Models', 'subheading': 'Classes'}
658 */
659 getConfig(): serialization.ConfigDict;
660 /**
661 * Dispose the weight variables that this Layer instance holds.
662 *
663 * @returns {number} Number of disposed variables.
664 */
665 protected disposeWeights(): number;
666 protected assertNotDisposed(): void;
667 /**
668 * Attempt to dispose layer's weights.
669 *
670 * This method decrease the reference count of the Layer object by 1.
671 *
672 * A Layer is reference-counted. Its reference count is incremented by 1
673 * the first item its `apply()` method is called and when it becomes a part
674 * of a new `Node` (through calling the `apply()`) method on a
675 * `tf.SymbolicTensor`).
676 *
677 * If the reference count of a Layer becomes 0, all the weights will be
678 * disposed and the underlying memory (e.g., the textures allocated in WebGL)
679 * will be freed.
680 *
681 * Note: If the reference count is greater than 0 after the decrement, the
682 * weights of the Layer will *not* be disposed.
683 *
684 * After a Layer is disposed, it cannot be used in calls such as `apply()`,
685 * `getWeights()` or `setWeights()` anymore.
686 *
687 * @returns A DisposeResult Object with the following fields:
688 * - refCountAfterDispose: The reference count of the Container after this
689 * `dispose()` call.
690 * - numDisposedVariables: Number of `tf.Variable`s (i.e., weights) disposed
691 * during this `dispose()` call.
692 * @throws {Error} If the layer is not built yet, or if the layer has already
693 * been disposed.
694 *
695 * @doc {heading: 'Models', 'subheading': 'Classes'}
696 */
697 dispose(): DisposeResult;
698}
699/**
700 * Returns the list of input tensors necessary to compute `tensor`.
701 *
702 * Output will always be a list of tensors (potentially with 1 element).
703 *
704 * @param tensor The tensor to start from.
705 * @param layer Origin layer of the tensor.
706 * @param nodeIndex Origin node index of the tensor.
707 *
708 * @return Array of input tensors.
709 */
710export declare function getSourceInputs(tensor: SymbolicTensor, layer?: Layer, nodeIndex?: number): SymbolicTensor[];
711
\No newline at end of file