UNPKG

55.3 kBTypeScriptView Raw
1/**
2 * @license
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 * =============================================================================
9 */
10/// <amd-module name="@tensorflow/tfjs-layers/dist/exports_layers" />
11import { InputLayer, InputLayerArgs } from './engine/input_layer';
12import { Layer, LayerArgs } from './engine/topology';
13import { input } from './exports';
14import { ELU, ELULayerArgs, LeakyReLU, LeakyReLULayerArgs, PReLU, PReLULayerArgs, ReLU, ReLULayerArgs, Softmax, SoftmaxLayerArgs, ThresholdedReLU, ThresholdedReLULayerArgs } from './layers/advanced_activations';
15import { Conv1D, Conv2D, Conv2DTranspose, Conv3D, ConvLayerArgs, Cropping2D, Cropping2DLayerArgs, SeparableConv2D, SeparableConvLayerArgs, UpSampling2D, UpSampling2DLayerArgs } from './layers/convolutional';
16import { DepthwiseConv2D, DepthwiseConv2DLayerArgs } from './layers/convolutional_depthwise';
17import { ConvLSTM2D, ConvLSTM2DArgs, ConvLSTM2DCell, ConvLSTM2DCellArgs } from './layers/convolutional_recurrent';
18import { Activation, ActivationLayerArgs, Dense, DenseLayerArgs, Dropout, DropoutLayerArgs, Flatten, FlattenLayerArgs, Masking, MaskingArgs, Permute, PermuteLayerArgs, RepeatVector, RepeatVectorLayerArgs, Reshape, ReshapeLayerArgs, SpatialDropout1D, SpatialDropout1DLayerConfig } from './layers/core';
19import { Embedding, EmbeddingLayerArgs } from './layers/embeddings';
20import { Add, Average, Concatenate, ConcatenateLayerArgs, Dot, DotLayerArgs, Maximum, Minimum, Multiply } from './layers/merge';
21import { AlphaDropout, AlphaDropoutArgs, GaussianDropout, GaussianDropoutArgs, GaussianNoise, GaussianNoiseArgs } from './layers/noise';
22import { BatchNormalization, BatchNormalizationLayerArgs, LayerNormalization, LayerNormalizationLayerArgs } from './layers/normalization';
23import { ZeroPadding2D, ZeroPadding2DLayerArgs } from './layers/padding';
24import { AveragePooling1D, AveragePooling2D, AveragePooling3D, GlobalAveragePooling1D, GlobalAveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalPooling2DLayerArgs, MaxPooling1D, MaxPooling2D, MaxPooling3D, Pooling1DLayerArgs, Pooling2DLayerArgs, Pooling3DLayerArgs } from './layers/pooling';
25import { GRU, GRUCell, GRUCellLayerArgs, GRULayerArgs, LSTM, LSTMCell, LSTMCellLayerArgs, LSTMLayerArgs, RNN, RNNCell, RNNLayerArgs, SimpleRNN, SimpleRNNCell, SimpleRNNCellLayerArgs, SimpleRNNLayerArgs, StackedRNNCells, StackedRNNCellsArgs } from './layers/recurrent';
26import { Bidirectional, BidirectionalLayerArgs, TimeDistributed, WrapperLayerArgs } from './layers/wrappers';
27/**
28 * An input layer is an entry point into a `tf.LayersModel`.
29 *
30 * `InputLayer` is generated automatically for `tf.Sequential`` models by
31 * specifying the `inputshape` or `batchInputShape` for the first layer. It
32 * should not be specified explicitly. However, it can be useful sometimes,
33 * e.g., when constructing a sequential model from a subset of another
34 * sequential model's layers. Like the code snippet below shows.
35 *
36 * ```js
37 * // Define a model which simply adds two inputs.
38 * const model1 = tf.sequential();
39 * model1.add(tf.layers.dense({inputShape: [4], units: 3, activation: 'relu'}));
40 * model1.add(tf.layers.dense({units: 1, activation: 'sigmoid'}));
41 * model1.summary();
42 * model1.predict(tf.zeros([1, 4])).print();
43 *
44 * // Construct another model, reusing the second layer of `model1` while
45 * // not using the first layer of `model1`. Note that you cannot add the second
46 * // layer of `model` directly as the first layer of the new sequential model,
47 * // because doing so will lead to an error related to the fact that the layer
48 * // is not an input layer. Instead, you need to create an `inputLayer` and add
49 * // it to the new sequential model before adding the reused layer.
50 * const model2 = tf.sequential();
51 * // Use an inputShape that matches the input shape of `model1`'s second
52 * // layer.
53 * model2.add(tf.layers.inputLayer({inputShape: [3]}));
54 * model2.add(model1.layers[1]);
55 * model2.summary();
56 * model2.predict(tf.zeros([1, 3])).print();
57 * ```
58 *
59 * @doc {heading: 'Layers', subheading: 'Inputs', namespace: 'layers'}
60 */
61export declare function inputLayer(args: InputLayerArgs): InputLayer;
62/**
63 * Exponetial Linear Unit (ELU).
64 *
65 * It follows:
66 * `f(x) = alpha * (exp(x) - 1.) for x < 0`,
67 * `f(x) = x for x >= 0`.
68 *
69 * Input shape:
70 * Arbitrary. Use the configuration `inputShape` when using this layer as the
71 * first layer in a model.
72 *
73 * Output shape:
74 * Same shape as the input.
75 *
76 * References:
77 * - [Fast and Accurate Deep Network Learning by Exponential Linear Units
78 * (ELUs)](https://arxiv.org/abs/1511.07289v1)
79 *
80 * @doc {
81 * heading: 'Layers',
82 * subheading: 'Advanced Activation',
83 * namespace: 'layers'
84 * }
85 */
86export declare function elu(args?: ELULayerArgs): ELU;
87/**
88 * Rectified Linear Unit activation function.
89 *
90 * Input shape:
91 * Arbitrary. Use the config field `inputShape` (Array of integers, does
92 * not include the sample axis) when using this layer as the first layer
93 * in a model.
94 *
95 * Output shape:
96 * Same shape as the input.
97 *
98 * @doc {
99 * heading: 'Layers',
100 * subheading: 'Advanced Activation',
101 * namespace: 'layers'
102 * }
103 */
104export declare function reLU(args?: ReLULayerArgs): ReLU;
105/**
106 * Leaky version of a rectified linear unit.
107 *
108 * It allows a small gradient when the unit is not active:
109 * `f(x) = alpha * x for x < 0.`
110 * `f(x) = x for x >= 0.`
111 *
112 * Input shape:
113 * Arbitrary. Use the configuration `inputShape` when using this layer as the
114 * first layer in a model.
115 *
116 * Output shape:
117 * Same shape as the input.
118 *
119 * @doc {
120 * heading: 'Layers',
121 * subheading: 'Advanced Activation',
122 * namespace: 'layers'
123 * }
124 */
125export declare function leakyReLU(args?: LeakyReLULayerArgs): LeakyReLU;
126/**
127 * Parameterized version of a leaky rectified linear unit.
128 *
129 * It follows
130 * `f(x) = alpha * x for x < 0.`
131 * `f(x) = x for x >= 0.`
132 * wherein `alpha` is a trainable weight.
133 *
134 * Input shape:
135 * Arbitrary. Use the configuration `inputShape` when using this layer as the
136 * first layer in a model.
137 *
138 * Output shape:
139 * Same shape as the input.
140 *
141 * @doc {
142 * heading: 'Layers',
143 * subheading: 'Advanced Activation',
144 * namespace: 'layers'
145 * }
146 */
147export declare function prelu(args?: PReLULayerArgs): PReLU;
148/**
149 * Softmax activation layer.
150 *
151 * Input shape:
152 * Arbitrary. Use the configuration `inputShape` when using this layer as the
153 * first layer in a model.
154 *
155 * Output shape:
156 * Same shape as the input.
157 *
158 * @doc {
159 * heading: 'Layers',
160 * subheading: 'Advanced Activation',
161 * namespace: 'layers'
162 * }
163 */
164export declare function softmax(args?: SoftmaxLayerArgs): Softmax;
165/**
166 * Thresholded Rectified Linear Unit.
167 *
168 * It follows:
169 * `f(x) = x for x > theta`,
170 * `f(x) = 0 otherwise`.
171 *
172 * Input shape:
173 * Arbitrary. Use the configuration `inputShape` when using this layer as the
174 * first layer in a model.
175 *
176 * Output shape:
177 * Same shape as the input.
178 *
179 * References:
180 * - [Zero-Bias Autoencoders and the Benefits of Co-Adapting
181 * Features](http://arxiv.org/abs/1402.3337)
182 *
183 * @doc {
184 * heading: 'Layers',
185 * subheading: 'Advanced Activation',
186 * namespace: 'layers'
187 * }
188 */
189export declare function thresholdedReLU(args?: ThresholdedReLULayerArgs): ThresholdedReLU;
190/**
191 * 1D convolution layer (e.g., temporal convolution).
192 *
193 * This layer creates a convolution kernel that is convolved
194 * with the layer input over a single spatial (or temporal) dimension
195 * to produce a tensor of outputs.
196 *
197 * If `use_bias` is True, a bias vector is created and added to the outputs.
198 *
199 * If `activation` is not `null`, it is applied to the outputs as well.
200 *
201 * When using this layer as the first layer in a model, provide an
202 * `inputShape` argument `Array` or `null`.
203 *
204 * For example, `inputShape` would be:
205 * - `[10, 128]` for sequences of 10 vectors of 128-dimensional vectors
206 * - `[null, 128]` for variable-length sequences of 128-dimensional vectors.
207 *
208 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
209 */
210export declare function conv1d(args: ConvLayerArgs): Conv1D;
211/**
212 * 2D convolution layer (e.g. spatial convolution over images).
213 *
214 * This layer creates a convolution kernel that is convolved
215 * with the layer input to produce a tensor of outputs.
216 *
217 * If `useBias` is True, a bias vector is created and added to the outputs.
218 *
219 * If `activation` is not `null`, it is applied to the outputs as well.
220 *
221 * When using this layer as the first layer in a model,
222 * provide the keyword argument `inputShape`
223 * (Array of integers, does not include the sample axis),
224 * e.g. `inputShape=[128, 128, 3]` for 128x128 RGB pictures
225 * in `dataFormat='channelsLast'`.
226 *
227 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
228 */
229export declare function conv2d(args: ConvLayerArgs): Conv2D;
230/**
231 * Transposed convolutional layer (sometimes called Deconvolution).
232 *
233 * The need for transposed convolutions generally arises
234 * from the desire to use a transformation going in the opposite direction of
235 * a normal convolution, i.e., from something that has the shape of the output
236 * of some convolution to something that has the shape of its input while
237 * maintaining a connectivity pattern that is compatible with said
238 * convolution.
239 *
240 * When using this layer as the first layer in a model, provide the
241 * configuration `inputShape` (`Array` of integers, does not include the
242 * sample axis), e.g., `inputShape: [128, 128, 3]` for 128x128 RGB pictures in
243 * `dataFormat: 'channelsLast'`.
244 *
245 * Input shape:
246 * 4D tensor with shape:
247 * `[batch, channels, rows, cols]` if `dataFormat` is `'channelsFirst'`.
248 * or 4D tensor with shape
249 * `[batch, rows, cols, channels]` if `dataFormat` is `'channelsLast`.
250 *
251 * Output shape:
252 * 4D tensor with shape:
253 * `[batch, filters, newRows, newCols]` if `dataFormat` is
254 * `'channelsFirst'`. or 4D tensor with shape:
255 * `[batch, newRows, newCols, filters]` if `dataFormat` is `'channelsLast'`.
256 *
257 * References:
258 * - [A guide to convolution arithmetic for deep
259 * learning](https://arxiv.org/abs/1603.07285v1)
260 * - [Deconvolutional
261 * Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
262 *
263 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
264 */
265export declare function conv2dTranspose(args: ConvLayerArgs): Conv2DTranspose;
266/**
267 * 3D convolution layer (e.g. spatial convolution over volumes).
268 *
269 * This layer creates a convolution kernel that is convolved
270 * with the layer input to produce a tensor of outputs.
271 *
272 * If `useBias` is True, a bias vector is created and added to the outputs.
273 *
274 * If `activation` is not `null`, it is applied to the outputs as well.
275 *
276 * When using this layer as the first layer in a model,
277 * provide the keyword argument `inputShape`
278 * (Array of integers, does not include the sample axis),
279 * e.g. `inputShape=[128, 128, 128, 1]` for 128x128x128 grayscale volumes
280 * in `dataFormat='channelsLast'`.
281 *
282 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
283 */
284export declare function conv3d(args: ConvLayerArgs): Conv3D;
285export declare function conv3dTranspose(args: ConvLayerArgs): Layer;
286/**
287 * Depthwise separable 2D convolution.
288 *
289 * Separable convolution consists of first performing
290 * a depthwise spatial convolution
291 * (which acts on each input channel separately)
292 * followed by a pointwise convolution which mixes together the resulting
293 * output channels. The `depthMultiplier` argument controls how many
294 * output channels are generated per input channel in the depthwise step.
295 *
296 * Intuitively, separable convolutions can be understood as
297 * a way to factorize a convolution kernel into two smaller kernels,
298 * or as an extreme version of an Inception block.
299 *
300 * Input shape:
301 * 4D tensor with shape:
302 * `[batch, channels, rows, cols]` if data_format='channelsFirst'
303 * or 4D tensor with shape:
304 * `[batch, rows, cols, channels]` if data_format='channelsLast'.
305 *
306 * Output shape:
307 * 4D tensor with shape:
308 * `[batch, filters, newRows, newCols]` if data_format='channelsFirst'
309 * or 4D tensor with shape:
310 * `[batch, newRows, newCols, filters]` if data_format='channelsLast'.
311 * `rows` and `cols` values might have changed due to padding.
312 *
313 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
314 */
315export declare function separableConv2d(args: SeparableConvLayerArgs): SeparableConv2D;
316/**
317 * Cropping layer for 2D input (e.g., image).
318 *
319 * This layer can crop an input
320 * at the top, bottom, left and right side of an image tensor.
321 *
322 * Input shape:
323 * 4D tensor with shape:
324 * - If `dataFormat` is `"channelsLast"`:
325 * `[batch, rows, cols, channels]`
326 * - If `data_format` is `"channels_first"`:
327 * `[batch, channels, rows, cols]`.
328 *
329 * Output shape:
330 * 4D with shape:
331 * - If `dataFormat` is `"channelsLast"`:
332 * `[batch, croppedRows, croppedCols, channels]`
333 * - If `dataFormat` is `"channelsFirst"`:
334 * `[batch, channels, croppedRows, croppedCols]`.
335 *
336 * Examples
337 * ```js
338 *
339 * const model = tf.sequential();
340 * model.add(tf.layers.cropping2D({cropping:[[2, 2], [2, 2]],
341 * inputShape: [128, 128, 3]}));
342 * //now output shape is [batch, 124, 124, 3]
343 * ```
344 *
345 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
346 */
347export declare function cropping2D(args: Cropping2DLayerArgs): Cropping2D;
348/**
349 * Upsampling layer for 2D inputs.
350 *
351 * Repeats the rows and columns of the data
352 * by size[0] and size[1] respectively.
353 *
354 *
355 * Input shape:
356 * 4D tensor with shape:
357 * - If `dataFormat` is `"channelsLast"`:
358 * `[batch, rows, cols, channels]`
359 * - If `dataFormat` is `"channelsFirst"`:
360 * `[batch, channels, rows, cols]`
361 *
362 * Output shape:
363 * 4D tensor with shape:
364 * - If `dataFormat` is `"channelsLast"`:
365 * `[batch, upsampledRows, upsampledCols, channels]`
366 * - If `dataFormat` is `"channelsFirst"`:
367 * `[batch, channels, upsampledRows, upsampledCols]`
368 *
369 *
370 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
371 */
372export declare function upSampling2d(args: UpSampling2DLayerArgs): UpSampling2D;
373/**
374 * Depthwise separable 2D convolution.
375 *
376 * Depthwise Separable convolutions consists in performing just the first step
377 * in a depthwise spatial convolution (which acts on each input channel
378 * separately). The `depthMultplier` argument controls how many output channels
379 * are generated per input channel in the depthwise step.
380 *
381 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
382 */
383export declare function depthwiseConv2d(args: DepthwiseConv2DLayerArgs): DepthwiseConv2D;
384/**
385 * Applies an activation function to an output.
386 *
387 * This layer applies element-wise activation function. Other layers, notably
388 * `dense` can also apply activation functions. Use this isolated activation
389 * function to extract the values before and after the
390 * activation. For instance:
391 *
392 * ```js
393 * const input = tf.input({shape: [5]});
394 * const denseLayer = tf.layers.dense({units: 1});
395 * const activationLayer = tf.layers.activation({activation: 'relu6'});
396 *
397 * // Obtain the output symbolic tensors by applying the layers in order.
398 * const denseOutput = denseLayer.apply(input);
399 * const activationOutput = activationLayer.apply(denseOutput);
400 *
401 * // Create the model based on the inputs.
402 * const model = tf.model({
403 * inputs: input,
404 * outputs: [denseOutput, activationOutput]
405 * });
406 *
407 * // Collect both outputs and print separately.
408 * const [denseOut, activationOut] = model.predict(tf.randomNormal([6, 5]));
409 * denseOut.print();
410 * activationOut.print();
411 * ```
412 *
413 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
414 */
415export declare function activation(args: ActivationLayerArgs): Activation;
416/**
417 * Creates a dense (fully connected) layer.
418 *
419 * This layer implements the operation:
420 * `output = activation(dot(input, kernel) + bias)`
421 *
422 * `activation` is the element-wise activation function
423 * passed as the `activation` argument.
424 *
425 * `kernel` is a weights matrix created by the layer.
426 *
427 * `bias` is a bias vector created by the layer (only applicable if `useBias`
428 * is `true`).
429 *
430 * **Input shape:**
431 *
432 * nD `tf.Tensor` with shape: `(batchSize, ..., inputDim)`.
433 *
434 * The most common situation would be
435 * a 2D input with shape `(batchSize, inputDim)`.
436 *
437 * **Output shape:**
438 *
439 * nD tensor with shape: `(batchSize, ..., units)`.
440 *
441 * For instance, for a 2D input with shape `(batchSize, inputDim)`,
442 * the output would have shape `(batchSize, units)`.
443 *
444 * Note: if the input to the layer has a rank greater than 2, then it is
445 * flattened prior to the initial dot product with the kernel.
446 *
447 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
448 */
449export declare function dense(args: DenseLayerArgs): Dense;
450/**
451 * Applies
452 * [dropout](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) to
453 * the input.
454 *
455 * Dropout consists in randomly setting a fraction `rate` of input units to 0 at
456 * each update during training time, which helps prevent overfitting.
457 *
458 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
459 */
460export declare function dropout(args: DropoutLayerArgs): Dropout;
461/**
462 * Spatial 1D version of Dropout.
463 *
464 * This Layer type performs the same function as the Dropout layer, but it drops
465 * entire 1D feature maps instead of individual elements. For example, if an
466 * input example consists of 3 timesteps and the feature map for each timestep
467 * has a size of 4, a `spatialDropout1d` layer may zero out the feature maps
468 * of the 1st timesteps and 2nd timesteps completely while sparing all feature
469 * elements of the 3rd timestep.
470 *
471 * If adjacent frames (timesteps) are strongly correlated (as is normally the
472 * case in early convolution layers), regular dropout will not regularize the
473 * activation and will otherwise just result in merely an effective learning
474 * rate decrease. In this case, `spatialDropout1d` will help promote
475 * independence among feature maps and should be used instead.
476 *
477 * **Arguments:**
478 * rate: A floating-point number >=0 and <=1. Fraction of the input elements
479 * to drop.
480 *
481 * **Input shape:**
482 * 3D tensor with shape `(samples, timesteps, channels)`.
483 *
484 * **Output shape:**
485 * Same as the input shape.
486 *
487 * References:
488 * - [Efficient Object Localization Using Convolutional
489 * Networks](https://arxiv.org/abs/1411.4280)
490 *
491 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
492 */
493export declare function spatialDropout1d(args: SpatialDropout1DLayerConfig): SpatialDropout1D;
494/**
495 * Flattens the input. Does not affect the batch size.
496 *
497 * A `Flatten` layer flattens each batch in its inputs to 1D (making the output
498 * 2D).
499 *
500 * For example:
501 *
502 * ```js
503 * const input = tf.input({shape: [4, 3]});
504 * const flattenLayer = tf.layers.flatten();
505 * // Inspect the inferred output shape of the flatten layer, which
506 * // equals `[null, 12]`. The 2nd dimension is 4 * 3, i.e., the result of the
507 * // flattening. (The 1st dimension is the undermined batch size.)
508 * console.log(JSON.stringify(flattenLayer.apply(input).shape));
509 * ```
510 *
511 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
512 */
513export declare function flatten(args?: FlattenLayerArgs): Flatten;
514/**
515 * Repeats the input n times in a new dimension.
516 *
517 * ```js
518 * const model = tf.sequential();
519 * model.add(tf.layers.repeatVector({n: 4, inputShape: [2]}));
520 * const x = tf.tensor2d([[10, 20]]);
521 * // Use the model to do inference on a data point the model hasn't see
522 * model.predict(x).print();
523 * // output shape is now [batch, 2, 4]
524 * ```
525 *
526 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
527 */
528export declare function repeatVector(args: RepeatVectorLayerArgs): RepeatVector;
529/**
530 * Reshapes an input to a certain shape.
531 *
532 * ```js
533 * const input = tf.input({shape: [4, 3]});
534 * const reshapeLayer = tf.layers.reshape({targetShape: [2, 6]});
535 * // Inspect the inferred output shape of the Reshape layer, which
536 * // equals `[null, 2, 6]`. (The 1st dimension is the undermined batch size.)
537 * console.log(JSON.stringify(reshapeLayer.apply(input).shape));
538 * ```
539 *
540 * Input shape:
541 * Arbitrary, although all dimensions in the input shape must be fixed.
542 * Use the configuration `inputShape` when using this layer as the
543 * first layer in a model.
544 *
545 *
546 * Output shape:
547 * [batchSize, targetShape[0], targetShape[1], ...,
548 * targetShape[targetShape.length - 1]].
549 *
550 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
551 */
552export declare function reshape(args: ReshapeLayerArgs): Reshape;
553/**
554 * Permutes the dimensions of the input according to a given pattern.
555 *
556 * Useful for, e.g., connecting RNNs and convnets together.
557 *
558 * Example:
559 *
560 * ```js
561 * const model = tf.sequential();
562 * model.add(tf.layers.permute({
563 * dims: [2, 1],
564 * inputShape: [10, 64]
565 * }));
566 * console.log(model.outputShape);
567 * // Now model's output shape is [null, 64, 10], where null is the
568 * // unpermuted sample (batch) dimension.
569 * ```
570 *
571 * Input shape:
572 * Arbitrary. Use the configuration field `inputShape` when using this
573 * layer as the first layer in a model.
574 *
575 * Output shape:
576 * Same rank as the input shape, but with the dimensions re-ordered (i.e.,
577 * permuted) according to the `dims` configuration of this layer.
578 *
579 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
580 */
581export declare function permute(args: PermuteLayerArgs): Permute;
582/**
583 * Maps positive integers (indices) into dense vectors of fixed size.
584 * eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
585 *
586 * **Input shape:** 2D tensor with shape: `[batchSize, sequenceLength]`.
587 *
588 * **Output shape:** 3D tensor with shape: `[batchSize, sequenceLength,
589 * outputDim]`.
590 *
591 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
592 */
593export declare function embedding(args: EmbeddingLayerArgs): Embedding;
594/**
595 * Layer that performs element-wise addition on an `Array` of inputs.
596 *
597 * It takes as input a list of tensors, all of the same shape, and returns a
598 * single tensor (also of the same shape). The inputs are specified as an
599 * `Array` when the `apply` method of the `Add` layer instance is called. For
600 * example:
601 *
602 * ```js
603 * const input1 = tf.input({shape: [2, 2]});
604 * const input2 = tf.input({shape: [2, 2]});
605 * const addLayer = tf.layers.add();
606 * const sum = addLayer.apply([input1, input2]);
607 * console.log(JSON.stringify(sum.shape));
608 * // You get [null, 2, 2], with the first dimension as the undetermined batch
609 * // dimension.
610 * ```
611 *
612 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
613 */
614export declare function add(args?: LayerArgs): Add;
615/**
616 * Layer that performs element-wise averaging on an `Array` of inputs.
617 *
618 * It takes as input a list of tensors, all of the same shape, and returns a
619 * single tensor (also of the same shape). For example:
620 *
621 * ```js
622 * const input1 = tf.input({shape: [2, 2]});
623 * const input2 = tf.input({shape: [2, 2]});
624 * const averageLayer = tf.layers.average();
625 * const average = averageLayer.apply([input1, input2]);
626 * console.log(JSON.stringify(average.shape));
627 * // You get [null, 2, 2], with the first dimension as the undetermined batch
628 * // dimension.
629 * ```
630 *
631 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
632 */
633export declare function average(args?: LayerArgs): Average;
634/**
635 * Layer that concatenates an `Array` of inputs.
636 *
637 * It takes a list of tensors, all of the same shape except for the
638 * concatenation axis, and returns a single tensor, the concatenation
639 * of all inputs. For example:
640 *
641 * ```js
642 * const input1 = tf.input({shape: [2, 2]});
643 * const input2 = tf.input({shape: [2, 3]});
644 * const concatLayer = tf.layers.concatenate();
645 * const output = concatLayer.apply([input1, input2]);
646 * console.log(JSON.stringify(output.shape));
647 * // You get [null, 2, 5], with the first dimension as the undetermined batch
648 * // dimension. The last dimension (5) is the result of concatenating the
649 * // last dimensions of the inputs (2 and 3).
650 * ```
651 *
652 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
653 */
654export declare function concatenate(args?: ConcatenateLayerArgs): Concatenate;
655/**
656 * Layer that computes the element-wise maximum an `Array` of inputs.
657 *
658 * It takes as input a list of tensors, all of the same shape and returns a
659 * single tensor (also of the same shape). For example:
660 *
661 * ```js
662 * const input1 = tf.input({shape: [2, 2]});
663 * const input2 = tf.input({shape: [2, 2]});
664 * const maxLayer = tf.layers.maximum();
665 * const max = maxLayer.apply([input1, input2]);
666 * console.log(JSON.stringify(max.shape));
667 * // You get [null, 2, 2], with the first dimension as the undetermined batch
668 * // dimension.
669 * ```
670 *
671 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
672 */
673export declare function maximum(args?: LayerArgs): Maximum;
674/**
675 * Layer that computes the element-wise minimum of an `Array` of inputs.
676 *
677 * It takes as input a list of tensors, all of the same shape and returns a
678 * single tensor (also of the same shape). For example:
679 *
680 * ```js
681 * const input1 = tf.input({shape: [2, 2]});
682 * const input2 = tf.input({shape: [2, 2]});
683 * const minLayer = tf.layers.minimum();
684 * const min = minLayer.apply([input1, input2]);
685 * console.log(JSON.stringify(min.shape));
686 * // You get [null, 2, 2], with the first dimension as the undetermined batch
687 * // dimension.
688 * ```
689 *
690 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
691 */
692export declare function minimum(args?: LayerArgs): Minimum;
693/**
694 * Layer that multiplies (element-wise) an `Array` of inputs.
695 *
696 * It takes as input an Array of tensors, all of the same
697 * shape, and returns a single tensor (also of the same shape).
698 * For example:
699 *
700 * ```js
701 * const input1 = tf.input({shape: [2, 2]});
702 * const input2 = tf.input({shape: [2, 2]});
703 * const input3 = tf.input({shape: [2, 2]});
704 * const multiplyLayer = tf.layers.multiply();
705 * const product = multiplyLayer.apply([input1, input2, input3]);
706 * console.log(product.shape);
707 * // You get [null, 2, 2], with the first dimension as the undetermined batch
708 * // dimension.
709 *
710 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
711 */
712export declare function multiply(args?: LayerArgs): Multiply;
713/**
714 * Layer that computes a dot product between samples in two tensors.
715 *
716 * E.g., if applied to a list of two tensors `a` and `b` both of shape
717 * `[batchSize, n]`, the output will be a tensor of shape `[batchSize, 1]`,
718 * where each entry at index `[i, 0]` will be the dot product between
719 * `a[i, :]` and `b[i, :]`.
720 *
721 * Example:
722 *
723 * ```js
724 * const dotLayer = tf.layers.dot({axes: -1});
725 * const x1 = tf.tensor2d([[10, 20], [30, 40]]);
726 * const x2 = tf.tensor2d([[-1, -2], [-3, -4]]);
727 *
728 * // Invoke the layer's apply() method in eager (imperative) mode.
729 * const y = dotLayer.apply([x1, x2]);
730 * y.print();
731 * ```
732 *
733 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
734 */
735export declare function dot(args: DotLayerArgs): Dot;
736/**
737 * Batch normalization layer (Ioffe and Szegedy, 2014).
738 *
739 * Normalize the activations of the previous layer at each batch,
740 * i.e. applies a transformation that maintains the mean activation
741 * close to 0 and the activation standard deviation close to 1.
742 *
743 * Input shape:
744 * Arbitrary. Use the keyword argument `inputShape` (Array of integers, does
745 * not include the sample axis) when calling the constructor of this class,
746 * if this layer is used as a first layer in a model.
747 *
748 * Output shape:
749 * Same shape as input.
750 *
751 * References:
752 * - [Batch Normalization: Accelerating Deep Network Training by Reducing
753 * Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
754 *
755 * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}
756 */
757export declare function batchNormalization(args?: BatchNormalizationLayerArgs): BatchNormalization;
758/**
759 * Layer-normalization layer (Ba et al., 2016).
760 *
761 * Normalizes the activations of the previous layer for each given example in a
762 * batch independently, instead of across a batch like in `batchNormalization`.
763 * In other words, this layer applies a transformation that maintanis the mean
764 * activation within each example close to0 and activation variance close to 1.
765 *
766 * Input shape:
767 * Arbitrary. Use the argument `inputShape` when using this layer as the first
768 * layer in a model.
769 *
770 * Output shape:
771 * Same as input.
772 *
773 * References:
774 * - [Layer Normalization](https://arxiv.org/abs/1607.06450)
775 *
776 * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}
777 */
778export declare function layerNormalization(args?: LayerNormalizationLayerArgs): LayerNormalization;
779/**
780 * Zero-padding layer for 2D input (e.g., image).
781 *
782 * This layer can add rows and columns of zeros
783 * at the top, bottom, left and right side of an image tensor.
784 *
785 * Input shape:
786 * 4D tensor with shape:
787 * - If `dataFormat` is `"channelsLast"`:
788 * `[batch, rows, cols, channels]`
789 * - If `data_format` is `"channels_first"`:
790 * `[batch, channels, rows, cols]`.
791 *
792 * Output shape:
793 * 4D with shape:
794 * - If `dataFormat` is `"channelsLast"`:
795 * `[batch, paddedRows, paddedCols, channels]`
796 * - If `dataFormat` is `"channelsFirst"`:
797 * `[batch, channels, paddedRows, paddedCols]`.
798 *
799 * @doc {heading: 'Layers', subheading: 'Padding', namespace: 'layers'}
800 */
801export declare function zeroPadding2d(args?: ZeroPadding2DLayerArgs): ZeroPadding2D;
802/**
803 * Average pooling operation for spatial data.
804 *
805 * Input shape: `[batchSize, inLength, channels]`
806 *
807 * Output shape: `[batchSize, pooledLength, channels]`
808 *
809 * `tf.avgPool1d` is an alias.
810 *
811 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
812 */
813export declare function averagePooling1d(args: Pooling1DLayerArgs): AveragePooling1D;
814export declare function avgPool1d(args: Pooling1DLayerArgs): AveragePooling1D;
815export declare function avgPooling1d(args: Pooling1DLayerArgs): AveragePooling1D;
816/**
817 * Average pooling operation for spatial data.
818 *
819 * Input shape:
820 * - If `dataFormat === CHANNEL_LAST`:
821 * 4D tensor with shape:
822 * `[batchSize, rows, cols, channels]`
823 * - If `dataFormat === CHANNEL_FIRST`:
824 * 4D tensor with shape:
825 * `[batchSize, channels, rows, cols]`
826 *
827 * Output shape
828 * - If `dataFormat === CHANNEL_LAST`:
829 * 4D tensor with shape:
830 * `[batchSize, pooleRows, pooledCols, channels]`
831 * - If `dataFormat === CHANNEL_FIRST`:
832 * 4D tensor with shape:
833 * `[batchSize, channels, pooleRows, pooledCols]`
834 *
835 * `tf.avgPool2d` is an alias.
836 *
837 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
838 */
839export declare function averagePooling2d(args: Pooling2DLayerArgs): AveragePooling2D;
840export declare function avgPool2d(args: Pooling2DLayerArgs): AveragePooling2D;
841export declare function avgPooling2d(args: Pooling2DLayerArgs): AveragePooling2D;
842/**
843 * Average pooling operation for 3D data.
844 *
845 * Input shape
846 * - If `dataFormat === channelsLast`:
847 * 5D tensor with shape:
848 * `[batchSize, depths, rows, cols, channels]`
849 * - If `dataFormat === channelsFirst`:
850 * 4D tensor with shape:
851 * `[batchSize, channels, depths, rows, cols]`
852 *
853 * Output shape
854 * - If `dataFormat=channelsLast`:
855 * 5D tensor with shape:
856 * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`
857 * - If `dataFormat=channelsFirst`:
858 * 5D tensor with shape:
859 * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`
860 *
861 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
862 */
863export declare function averagePooling3d(args: Pooling3DLayerArgs): AveragePooling3D;
864export declare function avgPool3d(args: Pooling3DLayerArgs): AveragePooling3D;
865export declare function avgPooling3d(args: Pooling3DLayerArgs): AveragePooling3D;
866/**
867 * Global average pooling operation for temporal data.
868 *
869 * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.
870 *
871 * Output Shape:2D tensor with shape: `[batchSize, features]`.
872 *
873 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
874 */
875export declare function globalAveragePooling1d(args?: LayerArgs): GlobalAveragePooling1D;
876/**
877 * Global average pooling operation for spatial data.
878 *
879 * Input shape:
880 * - If `dataFormat` is `CHANNEL_LAST`:
881 * 4D tensor with shape: `[batchSize, rows, cols, channels]`.
882 * - If `dataFormat` is `CHANNEL_FIRST`:
883 * 4D tensor with shape: `[batchSize, channels, rows, cols]`.
884 *
885 * Output shape:
886 * 2D tensor with shape: `[batchSize, channels]`.
887 *
888 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
889 */
890export declare function globalAveragePooling2d(args: GlobalPooling2DLayerArgs): GlobalAveragePooling2D;
891/**
892 * Global max pooling operation for temporal data.
893 *
894 * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.
895 *
896 * Output Shape:2D tensor with shape: `[batchSize, features]`.
897 *
898 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
899 */
900export declare function globalMaxPooling1d(args?: LayerArgs): GlobalMaxPooling1D;
901/**
902 * Global max pooling operation for spatial data.
903 *
904 * Input shape:
905 * - If `dataFormat` is `CHANNEL_LAST`:
906 * 4D tensor with shape: `[batchSize, rows, cols, channels]`.
907 * - If `dataFormat` is `CHANNEL_FIRST`:
908 * 4D tensor with shape: `[batchSize, channels, rows, cols]`.
909 *
910 * Output shape:
911 * 2D tensor with shape: `[batchSize, channels]`.
912 *
913 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
914 */
915export declare function globalMaxPooling2d(args: GlobalPooling2DLayerArgs): GlobalMaxPooling2D;
916/**
917 * Max pooling operation for temporal data.
918 *
919 * Input shape: `[batchSize, inLength, channels]`
920 *
921 * Output shape: `[batchSize, pooledLength, channels]`
922 *
923 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
924 */
925export declare function maxPooling1d(args: Pooling1DLayerArgs): MaxPooling1D;
926/**
927 * Max pooling operation for spatial data.
928 *
929 * Input shape
930 * - If `dataFormat === CHANNEL_LAST`:
931 * 4D tensor with shape:
932 * `[batchSize, rows, cols, channels]`
933 * - If `dataFormat === CHANNEL_FIRST`:
934 * 4D tensor with shape:
935 * `[batchSize, channels, rows, cols]`
936 *
937 * Output shape
938 * - If `dataFormat=CHANNEL_LAST`:
939 * 4D tensor with shape:
940 * `[batchSize, pooleRows, pooledCols, channels]`
941 * - If `dataFormat=CHANNEL_FIRST`:
942 * 4D tensor with shape:
943 * `[batchSize, channels, pooleRows, pooledCols]`
944 *
945 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
946 */
947export declare function maxPooling2d(args: Pooling2DLayerArgs): MaxPooling2D;
948/**
949 * Max pooling operation for 3D data.
950 *
951 * Input shape
952 * - If `dataFormat === channelsLast`:
953 * 5D tensor with shape:
954 * `[batchSize, depths, rows, cols, channels]`
955 * - If `dataFormat === channelsFirst`:
956 * 5D tensor with shape:
957 * `[batchSize, channels, depths, rows, cols]`
958 *
959 * Output shape
960 * - If `dataFormat=channelsLast`:
961 * 5D tensor with shape:
962 * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`
963 * - If `dataFormat=channelsFirst`:
964 * 5D tensor with shape:
965 * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`
966 *
967 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
968 */
969export declare function maxPooling3d(args: Pooling3DLayerArgs): MaxPooling3D;
970/**
971 * Gated Recurrent Unit - Cho et al. 2014.
972 *
973 * This is an `RNN` layer consisting of one `GRUCell`. However, unlike
974 * the underlying `GRUCell`, the `apply` method of `SimpleRNN` operates
975 * on a sequence of inputs. The shape of the input (not including the first,
976 * batch dimension) needs to be at least 2-D, with the first dimension being
977 * time steps. For example:
978 *
979 * ```js
980 * const rnn = tf.layers.gru({units: 8, returnSequences: true});
981 *
982 * // Create an input with 10 time steps.
983 * const input = tf.input({shape: [10, 20]});
984 * const output = rnn.apply(input);
985 *
986 * console.log(JSON.stringify(output.shape));
987 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
988 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
989 * // 3rd dimension is the `GRUCell`'s number of units.
990 *
991 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
992 */
993export declare function gru(args: GRULayerArgs): GRU;
994/**
995 * Cell class for `GRU`.
996 *
997 * `GRUCell` is distinct from the `RNN` subclass `GRU` in that its
998 * `apply` method takes the input data of only a single time step and returns
999 * the cell's output at the time step, while `GRU` takes the input data
1000 * over a number of time steps. For example:
1001 *
1002 * ```js
1003 * const cell = tf.layers.gruCell({units: 2});
1004 * const input = tf.input({shape: [10]});
1005 * const output = cell.apply(input);
1006 *
1007 * console.log(JSON.stringify(output.shape));
1008 * // [null, 10]: This is the cell's output at a single time step. The 1st
1009 * // dimension is the unknown batch size.
1010 * ```
1011 *
1012 * Instance(s) of `GRUCell` can be used to construct `RNN` layers. The
1013 * most typical use of this workflow is to combine a number of cells into a
1014 * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
1015 * RNN. For example:
1016 *
1017 * ```js
1018 * const cells = [
1019 * tf.layers.gruCell({units: 4}),
1020 * tf.layers.gruCell({units: 8}),
1021 * ];
1022 * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
1023 *
1024 * // Create an input with 10 time steps and a length-20 vector at each step.
1025 * const input = tf.input({shape: [10, 20]});
1026 * const output = rnn.apply(input);
1027 *
1028 * console.log(JSON.stringify(output.shape));
1029 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1030 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1031 * // 3rd dimension is the last `gruCell`'s number of units.
1032 * ```
1033 *
1034 * To create an `RNN` consisting of only *one* `GRUCell`, use the
1035 * `tf.layers.gru`.
1036 *
1037 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1038 */
1039export declare function gruCell(args: GRUCellLayerArgs): GRUCell;
1040/**
1041 * Long-Short Term Memory layer - Hochreiter 1997.
1042 *
1043 * This is an `RNN` layer consisting of one `LSTMCell`. However, unlike
1044 * the underlying `LSTMCell`, the `apply` method of `LSTM` operates
1045 * on a sequence of inputs. The shape of the input (not including the first,
1046 * batch dimension) needs to be at least 2-D, with the first dimension being
1047 * time steps. For example:
1048 *
1049 * ```js
1050 * const lstm = tf.layers.lstm({units: 8, returnSequences: true});
1051 *
1052 * // Create an input with 10 time steps.
1053 * const input = tf.input({shape: [10, 20]});
1054 * const output = lstm.apply(input);
1055 *
1056 * console.log(JSON.stringify(output.shape));
1057 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1058 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1059 * // 3rd dimension is the `LSTMCell`'s number of units.
1060 *
1061 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1062 */
1063export declare function lstm(args: LSTMLayerArgs): LSTM;
1064/**
1065 * Cell class for `LSTM`.
1066 *
1067 * `LSTMCell` is distinct from the `RNN` subclass `LSTM` in that its
1068 * `apply` method takes the input data of only a single time step and returns
1069 * the cell's output at the time step, while `LSTM` takes the input data
1070 * over a number of time steps. For example:
1071 *
1072 * ```js
1073 * const cell = tf.layers.lstmCell({units: 2});
1074 * const input = tf.input({shape: [10]});
1075 * const output = cell.apply(input);
1076 *
1077 * console.log(JSON.stringify(output.shape));
1078 * // [null, 10]: This is the cell's output at a single time step. The 1st
1079 * // dimension is the unknown batch size.
1080 * ```
1081 *
1082 * Instance(s) of `LSTMCell` can be used to construct `RNN` layers. The
1083 * most typical use of this workflow is to combine a number of cells into a
1084 * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
1085 * RNN. For example:
1086 *
1087 * ```js
1088 * const cells = [
1089 * tf.layers.lstmCell({units: 4}),
1090 * tf.layers.lstmCell({units: 8}),
1091 * ];
1092 * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
1093 *
1094 * // Create an input with 10 time steps and a length-20 vector at each step.
1095 * const input = tf.input({shape: [10, 20]});
1096 * const output = rnn.apply(input);
1097 *
1098 * console.log(JSON.stringify(output.shape));
1099 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1100 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1101 * // 3rd dimension is the last `lstmCell`'s number of units.
1102 * ```
1103 *
1104 * To create an `RNN` consisting of only *one* `LSTMCell`, use the
1105 * `tf.layers.lstm`.
1106 *
1107 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1108 */
1109export declare function lstmCell(args: LSTMCellLayerArgs): LSTMCell;
1110/**
1111 * Fully-connected RNN where the output is to be fed back to input.
1112 *
1113 * This is an `RNN` layer consisting of one `SimpleRNNCell`. However, unlike
1114 * the underlying `SimpleRNNCell`, the `apply` method of `SimpleRNN` operates
1115 * on a sequence of inputs. The shape of the input (not including the first,
1116 * batch dimension) needs to be at least 2-D, with the first dimension being
1117 * time steps. For example:
1118 *
1119 * ```js
1120 * const rnn = tf.layers.simpleRNN({units: 8, returnSequences: true});
1121 *
1122 * // Create an input with 10 time steps.
1123 * const input = tf.input({shape: [10, 20]});
1124 * const output = rnn.apply(input);
1125 *
1126 * console.log(JSON.stringify(output.shape));
1127 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1128 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1129 * // 3rd dimension is the `SimpleRNNCell`'s number of units.
1130 * ```
1131 *
1132 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1133 */
1134export declare function simpleRNN(args: SimpleRNNLayerArgs): SimpleRNN;
1135/**
1136 * Cell class for `SimpleRNN`.
1137 *
1138 * `SimpleRNNCell` is distinct from the `RNN` subclass `SimpleRNN` in that its
1139 * `apply` method takes the input data of only a single time step and returns
1140 * the cell's output at the time step, while `SimpleRNN` takes the input data
1141 * over a number of time steps. For example:
1142 *
1143 * ```js
1144 * const cell = tf.layers.simpleRNNCell({units: 2});
1145 * const input = tf.input({shape: [10]});
1146 * const output = cell.apply(input);
1147 *
1148 * console.log(JSON.stringify(output.shape));
1149 * // [null, 10]: This is the cell's output at a single time step. The 1st
1150 * // dimension is the unknown batch size.
1151 * ```
1152 *
1153 * Instance(s) of `SimpleRNNCell` can be used to construct `RNN` layers. The
1154 * most typical use of this workflow is to combine a number of cells into a
1155 * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
1156 * RNN. For example:
1157 *
1158 * ```js
1159 * const cells = [
1160 * tf.layers.simpleRNNCell({units: 4}),
1161 * tf.layers.simpleRNNCell({units: 8}),
1162 * ];
1163 * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
1164 *
1165 * // Create an input with 10 time steps and a length-20 vector at each step.
1166 * const input = tf.input({shape: [10, 20]});
1167 * const output = rnn.apply(input);
1168 *
1169 * console.log(JSON.stringify(output.shape));
1170 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1171 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1172 * // 3rd dimension is the last `SimpleRNNCell`'s number of units.
1173 * ```
1174 *
1175 * To create an `RNN` consisting of only *one* `SimpleRNNCell`, use the
1176 * `tf.layers.simpleRNN`.
1177 *
1178 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1179 */
1180export declare function simpleRNNCell(args: SimpleRNNCellLayerArgs): SimpleRNNCell;
1181/**
1182 * Convolutional LSTM layer - Xingjian Shi 2015.
1183 *
1184 * This is an `ConvRNN2D` layer consisting of one `ConvLSTM2DCell`. However,
1185 * unlike the underlying `ConvLSTM2DCell`, the `apply` method of `ConvLSTM2D`
1186 * operates on a sequence of inputs. The shape of the input (not including the
1187 * first, batch dimension) needs to be 4-D, with the first dimension being time
1188 * steps. For example:
1189 *
1190 * ```js
1191 * const filters = 3;
1192 * const kernelSize = 3;
1193 *
1194 * const batchSize = 4;
1195 * const sequenceLength = 2;
1196 * const size = 5;
1197 * const channels = 3;
1198 *
1199 * const inputShape = [batchSize, sequenceLength, size, size, channels];
1200 * const input = tf.ones(inputShape);
1201 *
1202 * const layer = tf.layers.convLstm2d({filters, kernelSize});
1203 *
1204 * const output = layer.apply(input);
1205 * ```
1206 */
1207/** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */
1208export declare function convLstm2d(args: ConvLSTM2DArgs): ConvLSTM2D;
1209/**
1210 * Cell class for `ConvLSTM2D`.
1211 *
1212 * `ConvLSTM2DCell` is distinct from the `ConvRNN2D` subclass `ConvLSTM2D` in
1213 * that its `call` method takes the input data of only a single time step and
1214 * returns the cell's output at the time step, while `ConvLSTM2D` takes the
1215 * input data over a number of time steps. For example:
1216 *
1217 * ```js
1218 * const filters = 3;
1219 * const kernelSize = 3;
1220 *
1221 * const sequenceLength = 1;
1222 * const size = 5;
1223 * const channels = 3;
1224 *
1225 * const inputShape = [sequenceLength, size, size, channels];
1226 * const input = tf.ones(inputShape);
1227 *
1228 * const cell = tf.layers.convLstm2dCell({filters, kernelSize});
1229 *
1230 * cell.build(input.shape);
1231 *
1232 * const outputSize = size - kernelSize + 1;
1233 * const outShape = [sequenceLength, outputSize, outputSize, filters];
1234 *
1235 * const initialH = tf.zeros(outShape);
1236 * const initialC = tf.zeros(outShape);
1237 *
1238 * const [o, h, c] = cell.call([input, initialH, initialC], {});
1239 * ```
1240 */
1241/** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */
1242export declare function convLstm2dCell(args: ConvLSTM2DCellArgs): ConvLSTM2DCell;
1243/**
1244 * Base class for recurrent layers.
1245 *
1246 * Input shape:
1247 * 3D tensor with shape `[batchSize, timeSteps, inputDim]`.
1248 *
1249 * Output shape:
1250 * - if `returnState`, an Array of tensors (i.e., `tf.Tensor`s). The first
1251 * tensor is the output. The remaining tensors are the states at the
1252 * last time step, each with shape `[batchSize, units]`.
1253 * - if `returnSequences`, the output will have shape
1254 * `[batchSize, timeSteps, units]`.
1255 * - else, the output will have shape `[batchSize, units]`.
1256 *
1257 * Masking:
1258 * This layer supports masking for input data with a variable number
1259 * of timesteps. To introduce masks to your data,
1260 * use an embedding layer with the `mask_zero` parameter
1261 * set to `True`.
1262 *
1263 * Notes on using statefulness in RNNs:
1264 * You can set RNN layers to be 'stateful', which means that the states
1265 * computed for the samples in one batch will be reused as initial states
1266 * for the samples in the next batch. This assumes a one-to-one mapping
1267 * between samples in different successive batches.
1268 *
1269 * To enable statefulness:
1270 * - specify `stateful: true` in the layer constructor.
1271 * - specify a fixed batch size for your model, by passing
1272 * if sequential model:
1273 * `batchInputShape=[...]` to the first layer in your model.
1274 * else for functional model with 1 or more Input layers:
1275 * `batchShape=[...]` to all the first layers in your model.
1276 * This is the expected shape of your inputs *including the batch size*.
1277 * It should be a tuple of integers, e.g. `(32, 10, 100)`.
1278 * - specify `shuffle=False` when calling fit().
1279 *
1280 * To reset the states of your model, call `.resetStates()` on either
1281 * a specific layer, or on your entire model.
1282 *
1283 * Note on specifying the initial state of RNNs
1284 * You can specify the initial state of RNN layers symbolically by
1285 * calling them with the option `initialState`. The value of
1286 * `initialState` should be a tensor or list of tensors representing
1287 * the initial state of the RNN layer.
1288 *
1289 * You can specify the initial state of RNN layers numerically by
1290 * calling `resetStates` with the keyword argument `states`. The value of
1291 * `states` should be a numpy array or list of numpy arrays representing
1292 * the initial state of the RNN layer.
1293 *
1294 * Note on passing external constants to RNNs
1295 * You can pass "external" constants to the cell using the `constants`
1296 * keyword argument of `RNN.call` method. This requires that the `cell.call`
1297 * method accepts the same keyword argument `constants`. Such constants
1298 * can be used to conditon the cell transformation on additional static inputs
1299 * (not changing over time), a.k.a an attention mechanism.
1300 *
1301 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1302 */
1303export declare function rnn(args: RNNLayerArgs): RNN;
1304/**
1305 * Wrapper allowing a stack of RNN cells to behave as a single cell.
1306 *
1307 * Used to implement efficient stacked RNNs.
1308 *
1309 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1310 */
1311export declare function stackedRNNCells(args: StackedRNNCellsArgs): StackedRNNCells;
1312/** @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'} */
1313export declare function bidirectional(args: BidirectionalLayerArgs): Bidirectional;
1314/**
1315 * This wrapper applies a layer to every temporal slice of an input.
1316 *
1317 * The input should be at least 3D, and the dimension of the index `1` will be
1318 * considered to be the temporal dimension.
1319 *
1320 * Consider a batch of 32 samples, where each sample is a sequence of 10 vectors
1321 * of 16 dimensions. The batch input shape of the layer is then `[32, 10,
1322 * 16]`, and the `inputShape`, not including the sample dimension, is
1323 * `[10, 16]`.
1324 *
1325 * You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10
1326 * timesteps, independently:
1327 *
1328 * ```js
1329 * const model = tf.sequential();
1330 * model.add(tf.layers.timeDistributed({
1331 * layer: tf.layers.dense({units: 8}),
1332 * inputShape: [10, 16],
1333 * }));
1334 *
1335 * // Now model.outputShape = [null, 10, 8].
1336 * // The output will then have shape `[32, 10, 8]`.
1337 *
1338 * // In subsequent layers, there is no need for `inputShape`:
1339 * model.add(tf.layers.timeDistributed({layer: tf.layers.dense({units: 32})}));
1340 * console.log(JSON.stringify(model.outputs[0].shape));
1341 * // Now model.outputShape = [null, 10, 32].
1342 * ```
1343 *
1344 * The output will then have shape `[32, 10, 32]`.
1345 *
1346 * `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for
1347 * instance a `Conv2D` layer.
1348 *
1349 * ```js
1350 * const model = tf.sequential();
1351 * model.add(tf.layers.timeDistributed({
1352 * layer: tf.layers.conv2d({filters: 64, kernelSize: [3, 3]}),
1353 * inputShape: [10, 299, 299, 3],
1354 * }));
1355 * console.log(JSON.stringify(model.outputs[0].shape));
1356 * ```
1357 *
1358 * @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'}
1359 */
1360export declare function timeDistributed(args: WrapperLayerArgs): TimeDistributed;
1361export declare const globalMaxPool1d: typeof globalMaxPooling1d;
1362export declare const globalMaxPool2d: typeof globalMaxPooling2d;
1363export declare const maxPool1d: typeof maxPooling1d;
1364export declare const maxPool2d: typeof maxPooling2d;
1365export { Layer, RNN, RNNCell, input };
1366/**
1367 * Apply additive zero-centered Gaussian noise.
1368 *
1369 * As it is a regularization layer, it is only active at training time.
1370 *
1371 * This is useful to mitigate overfitting
1372 * (you could see it as a form of random data augmentation).
1373 * Gaussian Noise (GS) is a natural choice as corruption process
1374 * for real valued inputs.
1375 *
1376 * # Arguments
1377 * stddev: float, standard deviation of the noise distribution.
1378 *
1379 * # Input shape
1380 * Arbitrary. Use the keyword argument `input_shape`
1381 * (tuple of integers, does not include the samples axis)
1382 * when using this layer as the first layer in a model.
1383 *
1384 * # Output shape
1385 * Same shape as input.
1386 *
1387 * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
1388 */
1389export declare function gaussianNoise(args: GaussianNoiseArgs): GaussianNoise;
1390/**
1391 * Apply multiplicative 1-centered Gaussian noise.
1392 *
1393 * As it is a regularization layer, it is only active at training time.
1394 *
1395 * Arguments:
1396 * - `rate`: float, drop probability (as with `Dropout`).
1397 * The multiplicative noise will have
1398 * standard deviation `sqrt(rate / (1 - rate))`.
1399 *
1400 * Input shape:
1401 * Arbitrary. Use the keyword argument `inputShape`
1402 * (tuple of integers, does not include the samples axis)
1403 * when using this layer as the first layer in a model.
1404 *
1405 * Output shape:
1406 * Same shape as input.
1407 *
1408 * References:
1409 * - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
1410 * http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
1411 *
1412 * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
1413 */
1414export declare function gaussianDropout(args: GaussianDropoutArgs): GaussianDropout;
1415/**
1416 * Applies Alpha Dropout to the input.
1417 *
1418 * As it is a regularization layer, it is only active at training time.
1419 *
1420 * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
1421 * to their original values, in order to ensure the self-normalizing property
1422 * even after this dropout.
1423 * Alpha Dropout fits well to Scaled Exponential Linear Units
1424 * by randomly setting activations to the negative saturation value.
1425 *
1426 * Arguments:
1427 * - `rate`: float, drop probability (as with `Dropout`).
1428 * The multiplicative noise will have
1429 * standard deviation `sqrt(rate / (1 - rate))`.
1430 * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the
1431 * shape for randomly generated keep/drop flags.
1432 *
1433 * Input shape:
1434 * Arbitrary. Use the keyword argument `inputShape`
1435 * (tuple of integers, does not include the samples axis)
1436 * when using this layer as the first layer in a model.
1437 *
1438 * Output shape:
1439 * Same shape as input.
1440 *
1441 * References:
1442 * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
1443 *
1444 * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
1445 */
1446export declare function alphaDropout(args: AlphaDropoutArgs): AlphaDropout;
1447/**
1448 * Masks a sequence by using a mask value to skip timesteps.
1449 *
1450 * If all features for a given sample timestep are equal to `mask_value`,
1451 * then the sample timestep will be masked (skipped) in all downstream layers
1452 * (as long as they support masking).
1453 *
1454 * If any downstream layer does not support masking yet receives such
1455 * an input mask, an exception will be raised.
1456 *
1457 * Arguments:
1458 * - `maskValue`: Either None or mask value to skip.
1459 *
1460 * Input shape:
1461 * Arbitrary. Use the keyword argument `inputShape`
1462 * (tuple of integers, does not include the samples axis)
1463 * when using this layer as the first layer in a model.
1464 *
1465 * Output shape:
1466 * Same shape as input.
1467 *
1468 * @doc {heading: 'Layers', subheading: 'Mask', namespace: 'layers'}
1469 */
1470export declare function masking(args?: MaskingArgs): Masking;