UNPKG

60.6 kBTypeScriptView Raw
1/**
2 * @license
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 * =============================================================================
9 */
10/// <amd-module name="@tensorflow/tfjs-layers/dist/exports_layers" />
11import { InputLayer, InputLayerArgs } from './engine/input_layer';
12import { Layer, LayerArgs } from './engine/topology';
13import { input } from './exports';
14import { ELU, ELULayerArgs, LeakyReLU, LeakyReLULayerArgs, PReLU, PReLULayerArgs, ReLU, ReLULayerArgs, Softmax, SoftmaxLayerArgs, ThresholdedReLU, ThresholdedReLULayerArgs } from './layers/advanced_activations';
15import { Conv1D, Conv2D, Conv2DTranspose, Conv3D, ConvLayerArgs, Cropping2D, Cropping2DLayerArgs, SeparableConv2D, SeparableConvLayerArgs, UpSampling2D, UpSampling2DLayerArgs } from './layers/convolutional';
16import { DepthwiseConv2D, DepthwiseConv2DLayerArgs } from './layers/convolutional_depthwise';
17import { ConvLSTM2D, ConvLSTM2DArgs, ConvLSTM2DCell, ConvLSTM2DCellArgs } from './layers/convolutional_recurrent';
18import { Activation, ActivationLayerArgs, Dense, DenseLayerArgs, Dropout, DropoutLayerArgs, Flatten, FlattenLayerArgs, Masking, MaskingArgs, Permute, PermuteLayerArgs, RepeatVector, RepeatVectorLayerArgs, Reshape, ReshapeLayerArgs, SpatialDropout1D, SpatialDropout1DLayerConfig } from './layers/core';
19import { Embedding, EmbeddingLayerArgs } from './layers/embeddings';
20import { Add, Average, Concatenate, ConcatenateLayerArgs, Dot, DotLayerArgs, Maximum, Minimum, Multiply } from './layers/merge';
21import { AlphaDropout, AlphaDropoutArgs, GaussianDropout, GaussianDropoutArgs, GaussianNoise, GaussianNoiseArgs } from './layers/noise';
22import { BatchNormalization, BatchNormalizationLayerArgs, LayerNormalization, LayerNormalizationLayerArgs } from './layers/normalization';
23import { ZeroPadding2D, ZeroPadding2DLayerArgs } from './layers/padding';
24import { AveragePooling1D, AveragePooling2D, AveragePooling3D, GlobalAveragePooling1D, GlobalAveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalPooling2DLayerArgs, MaxPooling1D, MaxPooling2D, MaxPooling3D, Pooling1DLayerArgs, Pooling2DLayerArgs, Pooling3DLayerArgs } from './layers/pooling';
25import { GRU, GRUCell, GRUCellLayerArgs, GRULayerArgs, LSTM, LSTMCell, LSTMCellLayerArgs, LSTMLayerArgs, RNN, RNNCell, RNNLayerArgs, SimpleRNN, SimpleRNNCell, SimpleRNNCellLayerArgs, SimpleRNNLayerArgs, StackedRNNCells, StackedRNNCellsArgs } from './layers/recurrent';
26import { Bidirectional, BidirectionalLayerArgs, TimeDistributed, WrapperLayerArgs } from './layers/wrappers';
27import { Rescaling, RescalingArgs } from './layers/preprocessing/image_preprocessing';
28import { CenterCrop, CenterCropArgs } from './layers/preprocessing/center_crop';
29import { CategoryEncoding, CategoryEncodingArgs } from './layers/preprocessing/category_encoding';
30import { Resizing, ResizingArgs } from './layers/preprocessing/image_resizing';
31/**
32 * An input layer is an entry point into a `tf.LayersModel`.
33 *
34 * `InputLayer` is generated automatically for `tf.Sequential` models by
35 * specifying the `inputshape` or `batchInputShape` for the first layer. It
36 * should not be specified explicitly. However, it can be useful sometimes,
37 * e.g., when constructing a sequential model from a subset of another
38 * sequential model's layers. Like the code snippet below shows.
39 *
40 * ```js
41 * // Define a model which simply adds two inputs.
42 * const model1 = tf.sequential();
43 * model1.add(tf.layers.dense({inputShape: [4], units: 3, activation: 'relu'}));
44 * model1.add(tf.layers.dense({units: 1, activation: 'sigmoid'}));
45 * model1.summary();
46 * model1.predict(tf.zeros([1, 4])).print();
47 *
48 * // Construct another model, reusing the second layer of `model1` while
49 * // not using the first layer of `model1`. Note that you cannot add the second
50 * // layer of `model` directly as the first layer of the new sequential model,
51 * // because doing so will lead to an error related to the fact that the layer
52 * // is not an input layer. Instead, you need to create an `inputLayer` and add
53 * // it to the new sequential model before adding the reused layer.
54 * const model2 = tf.sequential();
55 * // Use an inputShape that matches the input shape of `model1`'s second
56 * // layer.
57 * model2.add(tf.layers.inputLayer({inputShape: [3]}));
58 * model2.add(model1.layers[1]);
59 * model2.summary();
60 * model2.predict(tf.zeros([1, 3])).print();
61 * ```
62 *
63 * @doc {heading: 'Layers', subheading: 'Inputs', namespace: 'layers'}
64 */
65export declare function inputLayer(args: InputLayerArgs): InputLayer;
66/**
67 * Exponential Linear Unit (ELU).
68 *
69 * It follows:
70 * `f(x) = alpha * (exp(x) - 1.) for x < 0`,
71 * `f(x) = x for x >= 0`.
72 *
73 * Input shape:
74 * Arbitrary. Use the configuration `inputShape` when using this layer as the
75 * first layer in a model.
76 *
77 * Output shape:
78 * Same shape as the input.
79 *
80 * References:
81 * - [Fast and Accurate Deep Network Learning by Exponential Linear Units
82 * (ELUs)](https://arxiv.org/abs/1511.07289v1)
83 *
84 * @doc {
85 * heading: 'Layers',
86 * subheading: 'Advanced Activation',
87 * namespace: 'layers'
88 * }
89 */
90export declare function elu(args?: ELULayerArgs): ELU;
91/**
92 * Rectified Linear Unit activation function.
93 *
94 * Input shape:
95 * Arbitrary. Use the config field `inputShape` (Array of integers, does
96 * not include the sample axis) when using this layer as the first layer
97 * in a model.
98 *
99 * Output shape:
100 * Same shape as the input.
101 *
102 * @doc {
103 * heading: 'Layers',
104 * subheading: 'Advanced Activation',
105 * namespace: 'layers'
106 * }
107 */
108export declare function reLU(args?: ReLULayerArgs): ReLU;
109/**
110 * Leaky version of a rectified linear unit.
111 *
112 * It allows a small gradient when the unit is not active:
113 * `f(x) = alpha * x for x < 0.`
114 * `f(x) = x for x >= 0.`
115 *
116 * Input shape:
117 * Arbitrary. Use the configuration `inputShape` when using this layer as the
118 * first layer in a model.
119 *
120 * Output shape:
121 * Same shape as the input.
122 *
123 * @doc {
124 * heading: 'Layers',
125 * subheading: 'Advanced Activation',
126 * namespace: 'layers'
127 * }
128 */
129export declare function leakyReLU(args?: LeakyReLULayerArgs): LeakyReLU;
130/**
131 * Parameterized version of a leaky rectified linear unit.
132 *
133 * It follows
134 * `f(x) = alpha * x for x < 0.`
135 * `f(x) = x for x >= 0.`
136 * wherein `alpha` is a trainable weight.
137 *
138 * Input shape:
139 * Arbitrary. Use the configuration `inputShape` when using this layer as the
140 * first layer in a model.
141 *
142 * Output shape:
143 * Same shape as the input.
144 *
145 * @doc {
146 * heading: 'Layers',
147 * subheading: 'Advanced Activation',
148 * namespace: 'layers'
149 * }
150 */
151export declare function prelu(args?: PReLULayerArgs): PReLU;
152/**
153 * Softmax activation layer.
154 *
155 * Input shape:
156 * Arbitrary. Use the configuration `inputShape` when using this layer as the
157 * first layer in a model.
158 *
159 * Output shape:
160 * Same shape as the input.
161 *
162 * @doc {
163 * heading: 'Layers',
164 * subheading: 'Advanced Activation',
165 * namespace: 'layers'
166 * }
167 */
168export declare function softmax(args?: SoftmaxLayerArgs): Softmax;
169/**
170 * Thresholded Rectified Linear Unit.
171 *
172 * It follows:
173 * `f(x) = x for x > theta`,
174 * `f(x) = 0 otherwise`.
175 *
176 * Input shape:
177 * Arbitrary. Use the configuration `inputShape` when using this layer as the
178 * first layer in a model.
179 *
180 * Output shape:
181 * Same shape as the input.
182 *
183 * References:
184 * - [Zero-Bias Autoencoders and the Benefits of Co-Adapting
185 * Features](http://arxiv.org/abs/1402.3337)
186 *
187 * @doc {
188 * heading: 'Layers',
189 * subheading: 'Advanced Activation',
190 * namespace: 'layers'
191 * }
192 */
193export declare function thresholdedReLU(args?: ThresholdedReLULayerArgs): ThresholdedReLU;
194/**
195 * 1D convolution layer (e.g., temporal convolution).
196 *
197 * This layer creates a convolution kernel that is convolved
198 * with the layer input over a single spatial (or temporal) dimension
199 * to produce a tensor of outputs.
200 *
201 * If `use_bias` is True, a bias vector is created and added to the outputs.
202 *
203 * If `activation` is not `null`, it is applied to the outputs as well.
204 *
205 * When using this layer as the first layer in a model, provide an
206 * `inputShape` argument `Array` or `null`.
207 *
208 * For example, `inputShape` would be:
209 * - `[10, 128]` for sequences of 10 vectors of 128-dimensional vectors
210 * - `[null, 128]` for variable-length sequences of 128-dimensional vectors.
211 *
212 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
213 */
214export declare function conv1d(args: ConvLayerArgs): Conv1D;
215/**
216 * 2D convolution layer (e.g. spatial convolution over images).
217 *
218 * This layer creates a convolution kernel that is convolved
219 * with the layer input to produce a tensor of outputs.
220 *
221 * If `useBias` is True, a bias vector is created and added to the outputs.
222 *
223 * If `activation` is not `null`, it is applied to the outputs as well.
224 *
225 * When using this layer as the first layer in a model,
226 * provide the keyword argument `inputShape`
227 * (Array of integers, does not include the sample axis),
228 * e.g. `inputShape=[128, 128, 3]` for 128x128 RGB pictures
229 * in `dataFormat='channelsLast'`.
230 *
231 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
232 */
233export declare function conv2d(args: ConvLayerArgs): Conv2D;
234/**
235 * Transposed convolutional layer (sometimes called Deconvolution).
236 *
237 * The need for transposed convolutions generally arises
238 * from the desire to use a transformation going in the opposite direction of
239 * a normal convolution, i.e., from something that has the shape of the output
240 * of some convolution to something that has the shape of its input while
241 * maintaining a connectivity pattern that is compatible with said
242 * convolution.
243 *
244 * When using this layer as the first layer in a model, provide the
245 * configuration `inputShape` (`Array` of integers, does not include the
246 * sample axis), e.g., `inputShape: [128, 128, 3]` for 128x128 RGB pictures in
247 * `dataFormat: 'channelsLast'`.
248 *
249 * Input shape:
250 * 4D tensor with shape:
251 * `[batch, channels, rows, cols]` if `dataFormat` is `'channelsFirst'`.
252 * or 4D tensor with shape
253 * `[batch, rows, cols, channels]` if `dataFormat` is `'channelsLast'`.
254 *
255 * Output shape:
256 * 4D tensor with shape:
257 * `[batch, filters, newRows, newCols]` if `dataFormat` is
258 * `'channelsFirst'`. or 4D tensor with shape:
259 * `[batch, newRows, newCols, filters]` if `dataFormat` is `'channelsLast'`.
260 *
261 * References:
262 * - [A guide to convolution arithmetic for deep
263 * learning](https://arxiv.org/abs/1603.07285v1)
264 * - [Deconvolutional
265 * Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
266 *
267 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
268 */
269export declare function conv2dTranspose(args: ConvLayerArgs): Conv2DTranspose;
270/**
271 * 3D convolution layer (e.g. spatial convolution over volumes).
272 *
273 * This layer creates a convolution kernel that is convolved
274 * with the layer input to produce a tensor of outputs.
275 *
276 * If `useBias` is True, a bias vector is created and added to the outputs.
277 *
278 * If `activation` is not `null`, it is applied to the outputs as well.
279 *
280 * When using this layer as the first layer in a model,
281 * provide the keyword argument `inputShape`
282 * (Array of integers, does not include the sample axis),
283 * e.g. `inputShape=[128, 128, 128, 1]` for 128x128x128 grayscale volumes
284 * in `dataFormat='channelsLast'`.
285 *
286 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
287 */
288export declare function conv3d(args: ConvLayerArgs): Conv3D;
289export declare function conv3dTranspose(args: ConvLayerArgs): Layer;
290/**
291 * Depthwise separable 2D convolution.
292 *
293 * Separable convolution consists of first performing
294 * a depthwise spatial convolution
295 * (which acts on each input channel separately)
296 * followed by a pointwise convolution which mixes together the resulting
297 * output channels. The `depthMultiplier` argument controls how many
298 * output channels are generated per input channel in the depthwise step.
299 *
300 * Intuitively, separable convolutions can be understood as
301 * a way to factorize a convolution kernel into two smaller kernels,
302 * or as an extreme version of an Inception block.
303 *
304 * Input shape:
305 * 4D tensor with shape:
306 * `[batch, channels, rows, cols]` if data_format='channelsFirst'
307 * or 4D tensor with shape:
308 * `[batch, rows, cols, channels]` if data_format='channelsLast'.
309 *
310 * Output shape:
311 * 4D tensor with shape:
312 * `[batch, filters, newRows, newCols]` if data_format='channelsFirst'
313 * or 4D tensor with shape:
314 * `[batch, newRows, newCols, filters]` if data_format='channelsLast'.
315 * `rows` and `cols` values might have changed due to padding.
316 *
317 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
318 */
319export declare function separableConv2d(args: SeparableConvLayerArgs): SeparableConv2D;
320/**
321 * Cropping layer for 2D input (e.g., image).
322 *
323 * This layer can crop an input
324 * at the top, bottom, left and right side of an image tensor.
325 *
326 * Input shape:
327 * 4D tensor with shape:
328 * - If `dataFormat` is `"channelsLast"`:
329 * `[batch, rows, cols, channels]`
330 * - If `data_format` is `"channels_first"`:
331 * `[batch, channels, rows, cols]`.
332 *
333 * Output shape:
334 * 4D with shape:
335 * - If `dataFormat` is `"channelsLast"`:
336 * `[batch, croppedRows, croppedCols, channels]`
337 * - If `dataFormat` is `"channelsFirst"`:
338 * `[batch, channels, croppedRows, croppedCols]`.
339 *
340 * Examples
341 * ```js
342 *
343 * const model = tf.sequential();
344 * model.add(tf.layers.cropping2D({cropping:[[2, 2], [2, 2]],
345 * inputShape: [128, 128, 3]}));
346 * //now output shape is [batch, 124, 124, 3]
347 * ```
348 *
349 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
350 */
351export declare function cropping2D(args: Cropping2DLayerArgs): Cropping2D;
352/**
353 * Upsampling layer for 2D inputs.
354 *
355 * Repeats the rows and columns of the data
356 * by size[0] and size[1] respectively.
357 *
358 *
359 * Input shape:
360 * 4D tensor with shape:
361 * - If `dataFormat` is `"channelsLast"`:
362 * `[batch, rows, cols, channels]`
363 * - If `dataFormat` is `"channelsFirst"`:
364 * `[batch, channels, rows, cols]`
365 *
366 * Output shape:
367 * 4D tensor with shape:
368 * - If `dataFormat` is `"channelsLast"`:
369 * `[batch, upsampledRows, upsampledCols, channels]`
370 * - If `dataFormat` is `"channelsFirst"`:
371 * `[batch, channels, upsampledRows, upsampledCols]`
372 *
373 *
374 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
375 */
376export declare function upSampling2d(args: UpSampling2DLayerArgs): UpSampling2D;
377/**
378 * Depthwise separable 2D convolution.
379 *
380 * Depthwise Separable convolutions consists in performing just the first step
381 * in a depthwise spatial convolution (which acts on each input channel
382 * separately). The `depthMultiplier` argument controls how many output channels
383 * are generated per input channel in the depthwise step.
384 *
385 * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
386 */
387export declare function depthwiseConv2d(args: DepthwiseConv2DLayerArgs): DepthwiseConv2D;
388/**
389 * Applies an activation function to an output.
390 *
391 * This layer applies element-wise activation function. Other layers, notably
392 * `dense` can also apply activation functions. Use this isolated activation
393 * function to extract the values before and after the
394 * activation. For instance:
395 *
396 * ```js
397 * const input = tf.input({shape: [5]});
398 * const denseLayer = tf.layers.dense({units: 1});
399 * const activationLayer = tf.layers.activation({activation: 'relu6'});
400 *
401 * // Obtain the output symbolic tensors by applying the layers in order.
402 * const denseOutput = denseLayer.apply(input);
403 * const activationOutput = activationLayer.apply(denseOutput);
404 *
405 * // Create the model based on the inputs.
406 * const model = tf.model({
407 * inputs: input,
408 * outputs: [denseOutput, activationOutput]
409 * });
410 *
411 * // Collect both outputs and print separately.
412 * const [denseOut, activationOut] = model.predict(tf.randomNormal([6, 5]));
413 * denseOut.print();
414 * activationOut.print();
415 * ```
416 *
417 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
418 */
419export declare function activation(args: ActivationLayerArgs): Activation;
420/**
421 * Creates a dense (fully connected) layer.
422 *
423 * This layer implements the operation:
424 * `output = activation(dot(input, kernel) + bias)`
425 *
426 * `activation` is the element-wise activation function
427 * passed as the `activation` argument.
428 *
429 * `kernel` is a weights matrix created by the layer.
430 *
431 * `bias` is a bias vector created by the layer (only applicable if `useBias`
432 * is `true`).
433 *
434 * **Input shape:**
435 *
436 * nD `tf.Tensor` with shape: `(batchSize, ..., inputDim)`.
437 *
438 * The most common situation would be
439 * a 2D input with shape `(batchSize, inputDim)`.
440 *
441 * **Output shape:**
442 *
443 * nD tensor with shape: `(batchSize, ..., units)`.
444 *
445 * For instance, for a 2D input with shape `(batchSize, inputDim)`,
446 * the output would have shape `(batchSize, units)`.
447 *
448 * Note: if the input to the layer has a rank greater than 2, then it is
449 * flattened prior to the initial dot product with the kernel.
450 *
451 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
452 */
453export declare function dense(args: DenseLayerArgs): Dense;
454/**
455 * Applies
456 * [dropout](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) to
457 * the input.
458 *
459 * Dropout consists in randomly setting a fraction `rate` of input units to 0 at
460 * each update during training time, which helps prevent overfitting.
461 *
462 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
463 */
464export declare function dropout(args: DropoutLayerArgs): Dropout;
465/**
466 * Spatial 1D version of Dropout.
467 *
468 * This Layer type performs the same function as the Dropout layer, but it drops
469 * entire 1D feature maps instead of individual elements. For example, if an
470 * input example consists of 3 timesteps and the feature map for each timestep
471 * has a size of 4, a `spatialDropout1d` layer may zero out the feature maps
472 * of the 1st timesteps and 2nd timesteps completely while sparing all feature
473 * elements of the 3rd timestep.
474 *
475 * If adjacent frames (timesteps) are strongly correlated (as is normally the
476 * case in early convolution layers), regular dropout will not regularize the
477 * activation and will otherwise just result in merely an effective learning
478 * rate decrease. In this case, `spatialDropout1d` will help promote
479 * independence among feature maps and should be used instead.
480 *
481 * **Arguments:**
482 * rate: A floating-point number >=0 and <=1. Fraction of the input elements
483 * to drop.
484 *
485 * **Input shape:**
486 * 3D tensor with shape `(samples, timesteps, channels)`.
487 *
488 * **Output shape:**
489 * Same as the input shape.
490 *
491 * References:
492 * - [Efficient Object Localization Using Convolutional
493 * Networks](https://arxiv.org/abs/1411.4280)
494 *
495 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
496 */
497export declare function spatialDropout1d(args: SpatialDropout1DLayerConfig): SpatialDropout1D;
498/**
499 * Flattens the input. Does not affect the batch size.
500 *
501 * A `Flatten` layer flattens each batch in its inputs to 1D (making the output
502 * 2D).
503 *
504 * For example:
505 *
506 * ```js
507 * const input = tf.input({shape: [4, 3]});
508 * const flattenLayer = tf.layers.flatten();
509 * // Inspect the inferred output shape of the flatten layer, which
510 * // equals `[null, 12]`. The 2nd dimension is 4 * 3, i.e., the result of the
511 * // flattening. (The 1st dimension is the undermined batch size.)
512 * console.log(JSON.stringify(flattenLayer.apply(input).shape));
513 * ```
514 *
515 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
516 */
517export declare function flatten(args?: FlattenLayerArgs): Flatten;
518/**
519 * Repeats the input n times in a new dimension.
520 *
521 * ```js
522 * const model = tf.sequential();
523 * model.add(tf.layers.repeatVector({n: 4, inputShape: [2]}));
524 * const x = tf.tensor2d([[10, 20]]);
525 * // Use the model to do inference on a data point the model hasn't see
526 * model.predict(x).print();
527 * // output shape is now [batch, 2, 4]
528 * ```
529 *
530 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
531 */
532export declare function repeatVector(args: RepeatVectorLayerArgs): RepeatVector;
533/**
534 * Reshapes an input to a certain shape.
535 *
536 * ```js
537 * const input = tf.input({shape: [4, 3]});
538 * const reshapeLayer = tf.layers.reshape({targetShape: [2, 6]});
539 * // Inspect the inferred output shape of the Reshape layer, which
540 * // equals `[null, 2, 6]`. (The 1st dimension is the undermined batch size.)
541 * console.log(JSON.stringify(reshapeLayer.apply(input).shape));
542 * ```
543 *
544 * Input shape:
545 * Arbitrary, although all dimensions in the input shape must be fixed.
546 * Use the configuration `inputShape` when using this layer as the
547 * first layer in a model.
548 *
549 *
550 * Output shape:
551 * [batchSize, targetShape[0], targetShape[1], ...,
552 * targetShape[targetShape.length - 1]].
553 *
554 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
555 */
556export declare function reshape(args: ReshapeLayerArgs): Reshape;
557/**
558 * Permutes the dimensions of the input according to a given pattern.
559 *
560 * Useful for, e.g., connecting RNNs and convnets together.
561 *
562 * Example:
563 *
564 * ```js
565 * const model = tf.sequential();
566 * model.add(tf.layers.permute({
567 * dims: [2, 1],
568 * inputShape: [10, 64]
569 * }));
570 * console.log(model.outputShape);
571 * // Now model's output shape is [null, 64, 10], where null is the
572 * // unpermuted sample (batch) dimension.
573 * ```
574 *
575 * Input shape:
576 * Arbitrary. Use the configuration field `inputShape` when using this
577 * layer as the first layer in a model.
578 *
579 * Output shape:
580 * Same rank as the input shape, but with the dimensions re-ordered (i.e.,
581 * permuted) according to the `dims` configuration of this layer.
582 *
583 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
584 */
585export declare function permute(args: PermuteLayerArgs): Permute;
586/**
587 * Maps positive integers (indices) into dense vectors of fixed size.
588 * E.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
589 *
590 * **Input shape:** 2D tensor with shape: `[batchSize, sequenceLength]`.
591 *
592 * **Output shape:** 3D tensor with shape: `[batchSize, sequenceLength,
593 * outputDim]`.
594 *
595 * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
596 */
597export declare function embedding(args: EmbeddingLayerArgs): Embedding;
598/**
599 * Layer that performs element-wise addition on an `Array` of inputs.
600 *
601 * It takes as input a list of tensors, all of the same shape, and returns a
602 * single tensor (also of the same shape). The inputs are specified as an
603 * `Array` when the `apply` method of the `Add` layer instance is called. For
604 * example:
605 *
606 * ```js
607 * const input1 = tf.input({shape: [2, 2]});
608 * const input2 = tf.input({shape: [2, 2]});
609 * const addLayer = tf.layers.add();
610 * const sum = addLayer.apply([input1, input2]);
611 * console.log(JSON.stringify(sum.shape));
612 * // You get [null, 2, 2], with the first dimension as the undetermined batch
613 * // dimension.
614 * ```
615 *
616 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
617 */
618export declare function add(args?: LayerArgs): Add;
619/**
620 * Layer that performs element-wise averaging on an `Array` of inputs.
621 *
622 * It takes as input a list of tensors, all of the same shape, and returns a
623 * single tensor (also of the same shape). For example:
624 *
625 * ```js
626 * const input1 = tf.input({shape: [2, 2]});
627 * const input2 = tf.input({shape: [2, 2]});
628 * const averageLayer = tf.layers.average();
629 * const average = averageLayer.apply([input1, input2]);
630 * console.log(JSON.stringify(average.shape));
631 * // You get [null, 2, 2], with the first dimension as the undetermined batch
632 * // dimension.
633 * ```
634 *
635 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
636 */
637export declare function average(args?: LayerArgs): Average;
638/**
639 * Layer that concatenates an `Array` of inputs.
640 *
641 * It takes a list of tensors, all of the same shape except for the
642 * concatenation axis, and returns a single tensor, the concatenation
643 * of all inputs. For example:
644 *
645 * ```js
646 * const input1 = tf.input({shape: [2, 2]});
647 * const input2 = tf.input({shape: [2, 3]});
648 * const concatLayer = tf.layers.concatenate();
649 * const output = concatLayer.apply([input1, input2]);
650 * console.log(JSON.stringify(output.shape));
651 * // You get [null, 2, 5], with the first dimension as the undetermined batch
652 * // dimension. The last dimension (5) is the result of concatenating the
653 * // last dimensions of the inputs (2 and 3).
654 * ```
655 *
656 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
657 */
658export declare function concatenate(args?: ConcatenateLayerArgs): Concatenate;
659/**
660 * Layer that computes the element-wise maximum of an `Array` of inputs.
661 *
662 * It takes as input a list of tensors, all of the same shape, and returns a
663 * single tensor (also of the same shape). For example:
664 *
665 * ```js
666 * const input1 = tf.input({shape: [2, 2]});
667 * const input2 = tf.input({shape: [2, 2]});
668 * const maxLayer = tf.layers.maximum();
669 * const max = maxLayer.apply([input1, input2]);
670 * console.log(JSON.stringify(max.shape));
671 * // You get [null, 2, 2], with the first dimension as the undetermined batch
672 * // dimension.
673 * ```
674 *
675 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
676 */
677export declare function maximum(args?: LayerArgs): Maximum;
678/**
679 * Layer that computes the element-wise minimum of an `Array` of inputs.
680 *
681 * It takes as input a list of tensors, all of the same shape, and returns a
682 * single tensor (also of the same shape). For example:
683 *
684 * ```js
685 * const input1 = tf.input({shape: [2, 2]});
686 * const input2 = tf.input({shape: [2, 2]});
687 * const minLayer = tf.layers.minimum();
688 * const min = minLayer.apply([input1, input2]);
689 * console.log(JSON.stringify(min.shape));
690 * // You get [null, 2, 2], with the first dimension as the undetermined batch
691 * // dimension.
692 * ```
693 *
694 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
695 */
696export declare function minimum(args?: LayerArgs): Minimum;
697/**
698 * Layer that multiplies (element-wise) an `Array` of inputs.
699 *
700 * It takes as input an Array of tensors, all of the same
701 * shape, and returns a single tensor (also of the same shape).
702 * For example:
703 *
704 * ```js
705 * const input1 = tf.input({shape: [2, 2]});
706 * const input2 = tf.input({shape: [2, 2]});
707 * const input3 = tf.input({shape: [2, 2]});
708 * const multiplyLayer = tf.layers.multiply();
709 * const product = multiplyLayer.apply([input1, input2, input3]);
710 * console.log(product.shape);
711 * // You get [null, 2, 2], with the first dimension as the undetermined batch
712 * // dimension.
713 *
714 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
715 */
716export declare function multiply(args?: LayerArgs): Multiply;
717/**
718 * Layer that computes a dot product between samples in two tensors.
719 *
720 * E.g., if applied to a list of two tensors `a` and `b` both of shape
721 * `[batchSize, n]`, the output will be a tensor of shape `[batchSize, 1]`,
722 * where each entry at index `[i, 0]` will be the dot product between
723 * `a[i, :]` and `b[i, :]`.
724 *
725 * Example:
726 *
727 * ```js
728 * const dotLayer = tf.layers.dot({axes: -1});
729 * const x1 = tf.tensor2d([[10, 20], [30, 40]]);
730 * const x2 = tf.tensor2d([[-1, -2], [-3, -4]]);
731 *
732 * // Invoke the layer's apply() method in eager (imperative) mode.
733 * const y = dotLayer.apply([x1, x2]);
734 * y.print();
735 * ```
736 *
737 * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
738 */
739export declare function dot(args: DotLayerArgs): Dot;
740/**
741 * Batch normalization layer (Ioffe and Szegedy, 2014).
742 *
743 * Normalize the activations of the previous layer at each batch,
744 * i.e. applies a transformation that maintains the mean activation
745 * close to 0 and the activation standard deviation close to 1.
746 *
747 * Input shape:
748 * Arbitrary. Use the keyword argument `inputShape` (Array of integers, does
749 * not include the sample axis) when calling the constructor of this class,
750 * if this layer is used as a first layer in a model.
751 *
752 * Output shape:
753 * Same shape as input.
754 *
755 * References:
756 * - [Batch Normalization: Accelerating Deep Network Training by Reducing
757 * Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
758 *
759 * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}
760 */
761export declare function batchNormalization(args?: BatchNormalizationLayerArgs): BatchNormalization;
762/**
763 * Layer-normalization layer (Ba et al., 2016).
764 *
765 * Normalizes the activations of the previous layer for each given example in a
766 * batch independently, instead of across a batch like in `batchNormalization`.
767 * In other words, this layer applies a transformation that maintains the mean
768 * activation within each example close to 0 and activation variance close to 1.
769 *
770 * Input shape:
771 * Arbitrary. Use the argument `inputShape` when using this layer as the first
772 * layer in a model.
773 *
774 * Output shape:
775 * Same as input.
776 *
777 * References:
778 * - [Layer Normalization](https://arxiv.org/abs/1607.06450)
779 *
780 * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}
781 */
782export declare function layerNormalization(args?: LayerNormalizationLayerArgs): LayerNormalization;
783/**
784 * Zero-padding layer for 2D input (e.g., image).
785 *
786 * This layer can add rows and columns of zeros
787 * at the top, bottom, left and right side of an image tensor.
788 *
789 * Input shape:
790 * 4D tensor with shape:
791 * - If `dataFormat` is `"channelsLast"`:
792 * `[batch, rows, cols, channels]`
793 * - If `data_format` is `"channels_first"`:
794 * `[batch, channels, rows, cols]`.
795 *
796 * Output shape:
797 * 4D with shape:
798 * - If `dataFormat` is `"channelsLast"`:
799 * `[batch, paddedRows, paddedCols, channels]`
800 * - If `dataFormat` is `"channelsFirst"`:
801 * `[batch, channels, paddedRows, paddedCols]`.
802 *
803 * @doc {heading: 'Layers', subheading: 'Padding', namespace: 'layers'}
804 */
805export declare function zeroPadding2d(args?: ZeroPadding2DLayerArgs): ZeroPadding2D;
806/**
807 * Average pooling operation for spatial data.
808 *
809 * Input shape: `[batchSize, inLength, channels]`
810 *
811 * Output shape: `[batchSize, pooledLength, channels]`
812 *
813 * `tf.avgPool1d` is an alias.
814 *
815 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
816 */
817export declare function averagePooling1d(args: Pooling1DLayerArgs): AveragePooling1D;
818export declare function avgPool1d(args: Pooling1DLayerArgs): AveragePooling1D;
819export declare function avgPooling1d(args: Pooling1DLayerArgs): AveragePooling1D;
820/**
821 * Average pooling operation for spatial data.
822 *
823 * Input shape:
824 * - If `dataFormat === CHANNEL_LAST`:
825 * 4D tensor with shape:
826 * `[batchSize, rows, cols, channels]`
827 * - If `dataFormat === CHANNEL_FIRST`:
828 * 4D tensor with shape:
829 * `[batchSize, channels, rows, cols]`
830 *
831 * Output shape
832 * - If `dataFormat === CHANNEL_LAST`:
833 * 4D tensor with shape:
834 * `[batchSize, pooledRows, pooledCols, channels]`
835 * - If `dataFormat === CHANNEL_FIRST`:
836 * 4D tensor with shape:
837 * `[batchSize, channels, pooledRows, pooledCols]`
838 *
839 * `tf.avgPool2d` is an alias.
840 *
841 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
842 */
843export declare function averagePooling2d(args: Pooling2DLayerArgs): AveragePooling2D;
844export declare function avgPool2d(args: Pooling2DLayerArgs): AveragePooling2D;
845export declare function avgPooling2d(args: Pooling2DLayerArgs): AveragePooling2D;
846/**
847 * Average pooling operation for 3D data.
848 *
849 * Input shape
850 * - If `dataFormat === channelsLast`:
851 * 5D tensor with shape:
852 * `[batchSize, depths, rows, cols, channels]`
853 * - If `dataFormat === channelsFirst`:
854 * 4D tensor with shape:
855 * `[batchSize, channels, depths, rows, cols]`
856 *
857 * Output shape
858 * - If `dataFormat=channelsLast`:
859 * 5D tensor with shape:
860 * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`
861 * - If `dataFormat=channelsFirst`:
862 * 5D tensor with shape:
863 * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`
864 *
865 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
866 */
867export declare function averagePooling3d(args: Pooling3DLayerArgs): AveragePooling3D;
868export declare function avgPool3d(args: Pooling3DLayerArgs): AveragePooling3D;
869export declare function avgPooling3d(args: Pooling3DLayerArgs): AveragePooling3D;
870/**
871 * Global average pooling operation for temporal data.
872 *
873 * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.
874 *
875 * Output Shape: 2D tensor with shape: `[batchSize, features]`.
876 *
877 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
878 */
879export declare function globalAveragePooling1d(args?: LayerArgs): GlobalAveragePooling1D;
880/**
881 * Global average pooling operation for spatial data.
882 *
883 * Input shape:
884 * - If `dataFormat` is `CHANNEL_LAST`:
885 * 4D tensor with shape: `[batchSize, rows, cols, channels]`.
886 * - If `dataFormat` is `CHANNEL_FIRST`:
887 * 4D tensor with shape: `[batchSize, channels, rows, cols]`.
888 *
889 * Output shape:
890 * 2D tensor with shape: `[batchSize, channels]`.
891 *
892 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
893 */
894export declare function globalAveragePooling2d(args: GlobalPooling2DLayerArgs): GlobalAveragePooling2D;
895/**
896 * Global max pooling operation for temporal data.
897 *
898 * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.
899 *
900 * Output Shape: 2D tensor with shape: `[batchSize, features]`.
901 *
902 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
903 */
904export declare function globalMaxPooling1d(args?: LayerArgs): GlobalMaxPooling1D;
905/**
906 * Global max pooling operation for spatial data.
907 *
908 * Input shape:
909 * - If `dataFormat` is `CHANNEL_LAST`:
910 * 4D tensor with shape: `[batchSize, rows, cols, channels]`.
911 * - If `dataFormat` is `CHANNEL_FIRST`:
912 * 4D tensor with shape: `[batchSize, channels, rows, cols]`.
913 *
914 * Output shape:
915 * 2D tensor with shape: `[batchSize, channels]`.
916 *
917 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
918 */
919export declare function globalMaxPooling2d(args: GlobalPooling2DLayerArgs): GlobalMaxPooling2D;
920/**
921 * Max pooling operation for temporal data.
922 *
923 * Input shape: `[batchSize, inLength, channels]`
924 *
925 * Output shape: `[batchSize, pooledLength, channels]`
926 *
927 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
928 */
929export declare function maxPooling1d(args: Pooling1DLayerArgs): MaxPooling1D;
930/**
931 * Max pooling operation for spatial data.
932 *
933 * Input shape
934 * - If `dataFormat === CHANNEL_LAST`:
935 * 4D tensor with shape:
936 * `[batchSize, rows, cols, channels]`
937 * - If `dataFormat === CHANNEL_FIRST`:
938 * 4D tensor with shape:
939 * `[batchSize, channels, rows, cols]`
940 *
941 * Output shape
942 * - If `dataFormat=CHANNEL_LAST`:
943 * 4D tensor with shape:
944 * `[batchSize, pooledRows, pooledCols, channels]`
945 * - If `dataFormat=CHANNEL_FIRST`:
946 * 4D tensor with shape:
947 * `[batchSize, channels, pooledRows, pooledCols]`
948 *
949 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
950 */
951export declare function maxPooling2d(args: Pooling2DLayerArgs): MaxPooling2D;
952/**
953 * Max pooling operation for 3D data.
954 *
955 * Input shape
956 * - If `dataFormat === channelsLast`:
957 * 5D tensor with shape:
958 * `[batchSize, depths, rows, cols, channels]`
959 * - If `dataFormat === channelsFirst`:
960 * 5D tensor with shape:
961 * `[batchSize, channels, depths, rows, cols]`
962 *
963 * Output shape
964 * - If `dataFormat=channelsLast`:
965 * 5D tensor with shape:
966 * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`
967 * - If `dataFormat=channelsFirst`:
968 * 5D tensor with shape:
969 * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`
970 *
971 * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
972 */
973export declare function maxPooling3d(args: Pooling3DLayerArgs): MaxPooling3D;
974/**
975 * Gated Recurrent Unit - Cho et al. 2014.
976 *
977 * This is an `RNN` layer consisting of one `GRUCell`. However, unlike
978 * the underlying `GRUCell`, the `apply` method of `SimpleRNN` operates
979 * on a sequence of inputs. The shape of the input (not including the first,
980 * batch dimension) needs to be at least 2-D, with the first dimension being
981 * time steps. For example:
982 *
983 * ```js
984 * const rnn = tf.layers.gru({units: 8, returnSequences: true});
985 *
986 * // Create an input with 10 time steps.
987 * const input = tf.input({shape: [10, 20]});
988 * const output = rnn.apply(input);
989 *
990 * console.log(JSON.stringify(output.shape));
991 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
992 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
993 * // 3rd dimension is the `GRUCell`'s number of units.
994 *
995 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
996 */
997export declare function gru(args: GRULayerArgs): GRU;
998/**
999 * Cell class for `GRU`.
1000 *
1001 * `GRUCell` is distinct from the `RNN` subclass `GRU` in that its
1002 * `apply` method takes the input data of only a single time step and returns
1003 * the cell's output at the time step, while `GRU` takes the input data
1004 * over a number of time steps. For example:
1005 *
1006 * ```js
1007 * const cell = tf.layers.gruCell({units: 2});
1008 * const input = tf.input({shape: [10]});
1009 * const output = cell.apply(input);
1010 *
1011 * console.log(JSON.stringify(output.shape));
1012 * // [null, 10]: This is the cell's output at a single time step. The 1st
1013 * // dimension is the unknown batch size.
1014 * ```
1015 *
1016 * Instance(s) of `GRUCell` can be used to construct `RNN` layers. The
1017 * most typical use of this workflow is to combine a number of cells into a
1018 * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
1019 * RNN. For example:
1020 *
1021 * ```js
1022 * const cells = [
1023 * tf.layers.gruCell({units: 4}),
1024 * tf.layers.gruCell({units: 8}),
1025 * ];
1026 * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
1027 *
1028 * // Create an input with 10 time steps and a length-20 vector at each step.
1029 * const input = tf.input({shape: [10, 20]});
1030 * const output = rnn.apply(input);
1031 *
1032 * console.log(JSON.stringify(output.shape));
1033 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1034 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1035 * // 3rd dimension is the last `gruCell`'s number of units.
1036 * ```
1037 *
1038 * To create an `RNN` consisting of only *one* `GRUCell`, use the
1039 * `tf.layers.gru`.
1040 *
1041 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1042 */
1043export declare function gruCell(args: GRUCellLayerArgs): GRUCell;
1044/**
1045 * Long-Short Term Memory layer - Hochreiter 1997.
1046 *
1047 * This is an `RNN` layer consisting of one `LSTMCell`. However, unlike
1048 * the underlying `LSTMCell`, the `apply` method of `LSTM` operates
1049 * on a sequence of inputs. The shape of the input (not including the first,
1050 * batch dimension) needs to be at least 2-D, with the first dimension being
1051 * time steps. For example:
1052 *
1053 * ```js
1054 * const lstm = tf.layers.lstm({units: 8, returnSequences: true});
1055 *
1056 * // Create an input with 10 time steps.
1057 * const input = tf.input({shape: [10, 20]});
1058 * const output = lstm.apply(input);
1059 *
1060 * console.log(JSON.stringify(output.shape));
1061 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1062 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1063 * // 3rd dimension is the `LSTMCell`'s number of units.
1064 *
1065 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1066 */
1067export declare function lstm(args: LSTMLayerArgs): LSTM;
1068/**
1069 * Cell class for `LSTM`.
1070 *
1071 * `LSTMCell` is distinct from the `RNN` subclass `LSTM` in that its
1072 * `apply` method takes the input data of only a single time step and returns
1073 * the cell's output at the time step, while `LSTM` takes the input data
1074 * over a number of time steps. For example:
1075 *
1076 * ```js
1077 * const cell = tf.layers.lstmCell({units: 2});
1078 * const input = tf.input({shape: [10]});
1079 * const output = cell.apply(input);
1080 *
1081 * console.log(JSON.stringify(output.shape));
1082 * // [null, 10]: This is the cell's output at a single time step. The 1st
1083 * // dimension is the unknown batch size.
1084 * ```
1085 *
1086 * Instance(s) of `LSTMCell` can be used to construct `RNN` layers. The
1087 * most typical use of this workflow is to combine a number of cells into a
1088 * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
1089 * RNN. For example:
1090 *
1091 * ```js
1092 * const cells = [
1093 * tf.layers.lstmCell({units: 4}),
1094 * tf.layers.lstmCell({units: 8}),
1095 * ];
1096 * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
1097 *
1098 * // Create an input with 10 time steps and a length-20 vector at each step.
1099 * const input = tf.input({shape: [10, 20]});
1100 * const output = rnn.apply(input);
1101 *
1102 * console.log(JSON.stringify(output.shape));
1103 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1104 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1105 * // 3rd dimension is the last `lstmCell`'s number of units.
1106 * ```
1107 *
1108 * To create an `RNN` consisting of only *one* `LSTMCell`, use the
1109 * `tf.layers.lstm`.
1110 *
1111 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1112 */
1113export declare function lstmCell(args: LSTMCellLayerArgs): LSTMCell;
1114/**
1115 * Fully-connected RNN where the output is to be fed back to input.
1116 *
1117 * This is an `RNN` layer consisting of one `SimpleRNNCell`. However, unlike
1118 * the underlying `SimpleRNNCell`, the `apply` method of `SimpleRNN` operates
1119 * on a sequence of inputs. The shape of the input (not including the first,
1120 * batch dimension) needs to be at least 2-D, with the first dimension being
1121 * time steps. For example:
1122 *
1123 * ```js
1124 * const rnn = tf.layers.simpleRNN({units: 8, returnSequences: true});
1125 *
1126 * // Create an input with 10 time steps.
1127 * const input = tf.input({shape: [10, 20]});
1128 * const output = rnn.apply(input);
1129 *
1130 * console.log(JSON.stringify(output.shape));
1131 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1132 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1133 * // 3rd dimension is the `SimpleRNNCell`'s number of units.
1134 * ```
1135 *
1136 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1137 */
1138export declare function simpleRNN(args: SimpleRNNLayerArgs): SimpleRNN;
1139/**
1140 * Cell class for `SimpleRNN`.
1141 *
1142 * `SimpleRNNCell` is distinct from the `RNN` subclass `SimpleRNN` in that its
1143 * `apply` method takes the input data of only a single time step and returns
1144 * the cell's output at the time step, while `SimpleRNN` takes the input data
1145 * over a number of time steps. For example:
1146 *
1147 * ```js
1148 * const cell = tf.layers.simpleRNNCell({units: 2});
1149 * const input = tf.input({shape: [10]});
1150 * const output = cell.apply(input);
1151 *
1152 * console.log(JSON.stringify(output.shape));
1153 * // [null, 10]: This is the cell's output at a single time step. The 1st
1154 * // dimension is the unknown batch size.
1155 * ```
1156 *
1157 * Instance(s) of `SimpleRNNCell` can be used to construct `RNN` layers. The
1158 * most typical use of this workflow is to combine a number of cells into a
1159 * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
1160 * RNN. For example:
1161 *
1162 * ```js
1163 * const cells = [
1164 * tf.layers.simpleRNNCell({units: 4}),
1165 * tf.layers.simpleRNNCell({units: 8}),
1166 * ];
1167 * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
1168 *
1169 * // Create an input with 10 time steps and a length-20 vector at each step.
1170 * const input = tf.input({shape: [10, 20]});
1171 * const output = rnn.apply(input);
1172 *
1173 * console.log(JSON.stringify(output.shape));
1174 * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
1175 * // same as the sequence length of `input`, due to `returnSequences`: `true`;
1176 * // 3rd dimension is the last `SimpleRNNCell`'s number of units.
1177 * ```
1178 *
1179 * To create an `RNN` consisting of only *one* `SimpleRNNCell`, use the
1180 * `tf.layers.simpleRNN`.
1181 *
1182 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1183 */
1184export declare function simpleRNNCell(args: SimpleRNNCellLayerArgs): SimpleRNNCell;
1185/**
1186 * Convolutional LSTM layer - Xingjian Shi 2015.
1187 *
1188 * This is a `ConvRNN2D` layer consisting of one `ConvLSTM2DCell`. However,
1189 * unlike the underlying `ConvLSTM2DCell`, the `apply` method of `ConvLSTM2D`
1190 * operates on a sequence of inputs. The shape of the input (not including the
1191 * first, batch dimension) needs to be 4-D, with the first dimension being time
1192 * steps. For example:
1193 *
1194 * ```js
1195 * const filters = 3;
1196 * const kernelSize = 3;
1197 *
1198 * const batchSize = 4;
1199 * const sequenceLength = 2;
1200 * const size = 5;
1201 * const channels = 3;
1202 *
1203 * const inputShape = [batchSize, sequenceLength, size, size, channels];
1204 * const input = tf.ones(inputShape);
1205 *
1206 * const layer = tf.layers.convLstm2d({filters, kernelSize});
1207 *
1208 * const output = layer.apply(input);
1209 * ```
1210 */
1211/** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */
1212export declare function convLstm2d(args: ConvLSTM2DArgs): ConvLSTM2D;
1213/**
1214 * Cell class for `ConvLSTM2D`.
1215 *
1216 * `ConvLSTM2DCell` is distinct from the `ConvRNN2D` subclass `ConvLSTM2D` in
1217 * that its `call` method takes the input data of only a single time step and
1218 * returns the cell's output at the time step, while `ConvLSTM2D` takes the
1219 * input data over a number of time steps. For example:
1220 *
1221 * ```js
1222 * const filters = 3;
1223 * const kernelSize = 3;
1224 *
1225 * const sequenceLength = 1;
1226 * const size = 5;
1227 * const channels = 3;
1228 *
1229 * const inputShape = [sequenceLength, size, size, channels];
1230 * const input = tf.ones(inputShape);
1231 *
1232 * const cell = tf.layers.convLstm2dCell({filters, kernelSize});
1233 *
1234 * cell.build(input.shape);
1235 *
1236 * const outputSize = size - kernelSize + 1;
1237 * const outShape = [sequenceLength, outputSize, outputSize, filters];
1238 *
1239 * const initialH = tf.zeros(outShape);
1240 * const initialC = tf.zeros(outShape);
1241 *
1242 * const [o, h, c] = cell.call([input, initialH, initialC], {});
1243 * ```
1244 */
1245/** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */
1246export declare function convLstm2dCell(args: ConvLSTM2DCellArgs): ConvLSTM2DCell;
1247/**
1248 * Base class for recurrent layers.
1249 *
1250 * Input shape:
1251 * 3D tensor with shape `[batchSize, timeSteps, inputDim]`.
1252 *
1253 * Output shape:
1254 * - if `returnState`, an Array of tensors (i.e., `tf.Tensor`s). The first
1255 * tensor is the output. The remaining tensors are the states at the
1256 * last time step, each with shape `[batchSize, units]`.
1257 * - if `returnSequences`, the output will have shape
1258 * `[batchSize, timeSteps, units]`.
1259 * - else, the output will have shape `[batchSize, units]`.
1260 *
1261 * Masking:
1262 * This layer supports masking for input data with a variable number
1263 * of timesteps. To introduce masks to your data,
1264 * use an embedding layer with the `mask_zero` parameter
1265 * set to `True`.
1266 *
1267 * Notes on using statefulness in RNNs:
1268 * You can set RNN layers to be 'stateful', which means that the states
1269 * computed for the samples in one batch will be reused as initial states
1270 * for the samples in the next batch. This assumes a one-to-one mapping
1271 * between samples in different successive batches.
1272 *
1273 * To enable statefulness:
1274 * - specify `stateful: true` in the layer constructor.
1275 * - specify a fixed batch size for your model, by passing
1276 * if sequential model:
1277 * `batchInputShape=[...]` to the first layer in your model.
1278 * else for functional model with 1 or more Input layers:
1279 * `batchShape=[...]` to all the first layers in your model.
1280 * This is the expected shape of your inputs *including the batch size*.
1281 * It should be a tuple of integers, e.g. `(32, 10, 100)`.
1282 * - specify `shuffle=False` when calling fit().
1283 *
1284 * To reset the states of your model, call `.resetStates()` on either
1285 * a specific layer, or on your entire model.
1286 *
1287 * Note on specifying the initial state of RNNs
1288 * You can specify the initial state of RNN layers symbolically by
1289 * calling them with the option `initialState`. The value of
1290 * `initialState` should be a tensor or list of tensors representing
1291 * the initial state of the RNN layer.
1292 *
1293 * You can specify the initial state of RNN layers numerically by
1294 * calling `resetStates` with the keyword argument `states`. The value of
1295 * `states` should be a numpy array or list of numpy arrays representing
1296 * the initial state of the RNN layer.
1297 *
1298 * Note on passing external constants to RNNs
1299 * You can pass "external" constants to the cell using the `constants`
1300 * keyword argument of `RNN.call` method. This requires that the `cell.call`
1301 * method accepts the same keyword argument `constants`. Such constants
1302 * can be used to condition the cell transformation on additional static
1303 * inputs (not changing over time), a.k.a. an attention mechanism.
1304 *
1305 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1306 */
1307export declare function rnn(args: RNNLayerArgs): RNN;
1308/**
1309 * Wrapper allowing a stack of RNN cells to behave as a single cell.
1310 *
1311 * Used to implement efficient stacked RNNs.
1312 *
1313 * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
1314 */
1315export declare function stackedRNNCells(args: StackedRNNCellsArgs): StackedRNNCells;
1316/** @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'} */
1317export declare function bidirectional(args: BidirectionalLayerArgs): Bidirectional;
1318/**
1319 * This wrapper applies a layer to every temporal slice of an input.
1320 *
1321 * The input should be at least 3D, and the dimension of the index `1` will be
1322 * considered to be the temporal dimension.
1323 *
1324 * Consider a batch of 32 samples, where each sample is a sequence of 10 vectors
1325 * of 16 dimensions. The batch input shape of the layer is then `[32, 10,
1326 * 16]`, and the `inputShape`, not including the sample dimension, is
1327 * `[10, 16]`.
1328 *
1329 * You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10
1330 * timesteps, independently:
1331 *
1332 * ```js
1333 * const model = tf.sequential();
1334 * model.add(tf.layers.timeDistributed({
1335 * layer: tf.layers.dense({units: 8}),
1336 * inputShape: [10, 16],
1337 * }));
1338 *
1339 * // Now model.outputShape = [null, 10, 8].
1340 * // The output will then have shape `[32, 10, 8]`.
1341 *
1342 * // In subsequent layers, there is no need for `inputShape`:
1343 * model.add(tf.layers.timeDistributed({layer: tf.layers.dense({units: 32})}));
1344 * console.log(JSON.stringify(model.outputs[0].shape));
1345 * // Now model.outputShape = [null, 10, 32].
1346 * ```
1347 *
1348 * The output will then have shape `[32, 10, 32]`.
1349 *
1350 * `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for
1351 * instance a `Conv2D` layer.
1352 *
1353 * ```js
1354 * const model = tf.sequential();
1355 * model.add(tf.layers.timeDistributed({
1356 * layer: tf.layers.conv2d({filters: 64, kernelSize: [3, 3]}),
1357 * inputShape: [10, 299, 299, 3],
1358 * }));
1359 * console.log(JSON.stringify(model.outputs[0].shape));
1360 * ```
1361 *
1362 * @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'}
1363 */
1364export declare function timeDistributed(args: WrapperLayerArgs): TimeDistributed;
1365export declare const globalMaxPool1d: typeof globalMaxPooling1d;
1366export declare const globalMaxPool2d: typeof globalMaxPooling2d;
1367export declare const maxPool1d: typeof maxPooling1d;
1368export declare const maxPool2d: typeof maxPooling2d;
1369export { Layer, RNN, RNNCell, input };
1370/**
1371 * Apply additive zero-centered Gaussian noise.
1372 *
1373 * As it is a regularization layer, it is only active at training time.
1374 *
1375 * This is useful to mitigate overfitting
1376 * (you could see it as a form of random data augmentation).
1377 * Gaussian Noise (GS) is a natural choice as corruption process
1378 * for real valued inputs.
1379 *
1380 * # Arguments
1381 * stddev: float, standard deviation of the noise distribution.
1382 *
1383 * # Input shape
1384 * Arbitrary. Use the keyword argument `input_shape`
1385 * (tuple of integers, does not include the samples axis)
1386 * when using this layer as the first layer in a model.
1387 *
1388 * # Output shape
1389 * Same shape as input.
1390 *
1391 * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
1392 */
1393export declare function gaussianNoise(args: GaussianNoiseArgs): GaussianNoise;
1394/**
1395 * Apply multiplicative 1-centered Gaussian noise.
1396 *
1397 * As it is a regularization layer, it is only active at training time.
1398 *
1399 * Arguments:
1400 * - `rate`: float, drop probability (as with `Dropout`).
1401 * The multiplicative noise will have
1402 * standard deviation `sqrt(rate / (1 - rate))`.
1403 *
1404 * Input shape:
1405 * Arbitrary. Use the keyword argument `inputShape`
1406 * (tuple of integers, does not include the samples axis)
1407 * when using this layer as the first layer in a model.
1408 *
1409 * Output shape:
1410 * Same shape as input.
1411 *
1412 * References:
1413 * - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
1414 * http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
1415 *
1416 * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
1417 */
1418export declare function gaussianDropout(args: GaussianDropoutArgs): GaussianDropout;
1419/**
1420 * Applies Alpha Dropout to the input.
1421 *
1422 * As it is a regularization layer, it is only active at training time.
1423 *
1424 * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
1425 * to their original values, in order to ensure the self-normalizing property
1426 * even after this dropout.
1427 * Alpha Dropout fits well to Scaled Exponential Linear Units
1428 * by randomly setting activations to the negative saturation value.
1429 *
1430 * Arguments:
1431 * - `rate`: float, drop probability (as with `Dropout`).
1432 * The multiplicative noise will have
1433 * standard deviation `sqrt(rate / (1 - rate))`.
1434 * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the
1435 * shape for randomly generated keep/drop flags.
1436 *
1437 * Input shape:
1438 * Arbitrary. Use the keyword argument `inputShape`
1439 * (tuple of integers, does not include the samples axis)
1440 * when using this layer as the first layer in a model.
1441 *
1442 * Output shape:
1443 * Same shape as input.
1444 *
1445 * References:
1446 * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
1447 *
1448 * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
1449 */
1450export declare function alphaDropout(args: AlphaDropoutArgs): AlphaDropout;
1451/**
1452 * Masks a sequence by using a mask value to skip timesteps.
1453 *
1454 * If all features for a given sample timestep are equal to `mask_value`,
1455 * then the sample timestep will be masked (skipped) in all downstream layers
1456 * (as long as they support masking).
1457 *
1458 * If any downstream layer does not support masking yet receives such
1459 * an input mask, an exception will be raised.
1460 *
1461 * Arguments:
1462 * - `maskValue`: Either None or mask value to skip.
1463 *
1464 * Input shape:
1465 * Arbitrary. Use the keyword argument `inputShape`
1466 * (tuple of integers, does not include the samples axis)
1467 * when using this layer as the first layer in a model.
1468 *
1469 * Output shape:
1470 * Same shape as input.
1471 *
1472 * @doc {heading: 'Layers', subheading: 'Mask', namespace: 'layers'}
1473 */
1474export declare function masking(args?: MaskingArgs): Masking;
1475/**
1476 * A preprocessing layer which rescales input values to a new range.
1477 *
1478 * This layer rescales every value of an input (often an image) by multiplying
1479 * by `scale` and adding `offset`.
1480 *
1481 * For instance:
1482 * 1. To rescale an input in the ``[0, 255]`` range
1483 * to be in the `[0, 1]` range, you would pass `scale=1/255`.
1484 * 2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]`
1485 * range, you would pass `scale=1./127.5, offset=-1`.
1486 * The rescaling is applied both during training and inference. Inputs can be
1487 * of integer or floating point dtype, and by default the layer will output
1488 * floats.
1489 *
1490 * Arguments:
1491 * - `scale`: Float, the scale to apply to the inputs.
1492 * - `offset`: Float, the offset to apply to the inputs.
1493 *
1494 * Input shape:
1495 * Arbitrary.
1496 *
1497 * Output shape:
1498 * Same as input.
1499 *
1500 * @doc {heading: 'Layers', subheading: 'Rescaling', namespace: 'layers'}
1501 */
1502export declare function rescaling(args?: RescalingArgs): Rescaling;
1503/**
1504 * A preprocessing layer which center crops images.
1505 *
1506 * This layers crops the central portion of the images to a target size. If an
1507 * image is smaller than the target size, it will be resized and cropped so as
1508 * to return the largest possible window in the image that matches the target
1509 * aspect ratio.
1510 *
1511 * Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
1512 * of integer or floating point dtype.
1513 *
1514 * If the input height/width is even and the target height/width is odd (or
1515 * inversely), the input image is left-padded by 1 pixel.
1516 *
1517 * Arguments:
1518 * `height`: Integer, the height of the output shape.
1519 * `width`: Integer, the width of the output shape.
1520 *
1521 * Input shape:
1522 * 3D (unbatched) or 4D (batched) tensor with shape:
1523 * `(..., height, width, channels)`, in `channelsLast` format.
1524 *
1525 * Output shape:
1526 * 3D (unbatched) or 4D (batched) tensor with shape:
1527 * `(..., targetHeight, targetWidth, channels)`.
1528 *
1529 *
1530 * @doc {heading: 'Layers', subheading: 'CenterCrop', namespace: 'layers'}
1531 */
1532export declare function centerCrop(args?: CenterCropArgs): CenterCrop;
1533/**
1534 * A preprocessing layer which resizes images.
1535 * This layer resizes an image input to a target height and width. The input
1536 * should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
1537 * format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0,
1538 * 255]`) and of interger or floating point dtype. By default, the layer will
1539 * output floats.
1540 *
1541 * Arguments:
1542 * - `height`: number, the height for the output tensor.
1543 * - `width`: number, the width for the output tensor.
1544 * - `interpolation`: string, the method for image resizing interpolation.
1545 * - `cropToAspectRatio`: boolean, whether to keep image aspect ratio.
1546 *
1547 * Input shape:
1548 * Arbitrary.
1549 *
1550 * Output shape:
1551 * height, width, num channels.
1552 *
1553 * @doc {heading: 'Layers', subheading: 'Resizing', namespace: 'layers'}
1554 */
1555export declare function resizing(args?: ResizingArgs): Resizing;
1556/**
1557 * A preprocessing layer which encodes integer features.
1558 *
1559 * This layer provides options for condensing data into a categorical encoding
1560 * when the total number of tokens are known in advance. It accepts integer
1561 * values as inputs, and it outputs a dense representation of those
1562 * inputs.
1563 *
1564 * Arguments:
1565 *
1566 * numTokens: The total number of tokens the layer should support. All
1567 * inputs to the layer must integers in the range `0 <= value <
1568 * numTokens`, or an error will be thrown.
1569 *
1570 * outputMode: Specification for the output of the layer.
1571 * Defaults to `multiHot`. Values can be `oneHot`, `multiHot` or
1572 * `count`, configuring the layer as follows:
1573 *
1574 * oneHot: Encodes each individual element in the input into an
1575 * array of `numTokens` size, containing a 1 at the element index. If
1576 * the last dimension is size 1, will encode on that dimension. If the
1577 * last dimension is not size 1, will append a new dimension for the
1578 * encoded output.
1579 *
1580 * multiHot: Encodes each sample in the input into a single array
1581 * of `numTokens` size, containing a 1 for each vocabulary term
1582 * present in the sample. Treats the last dimension as the sample
1583 * dimension, if input shape is `(..., sampleLength)`, output shape
1584 * will be `(..., numTokens)`.
1585 *
1586 * count: Like `multiHot`, but the int array contains a count of
1587 * the number of times the token at that index appeared in the sample.
1588 *
1589 * For all output modes, currently only output up to rank 2 is supported.
1590 * Call arguments:
1591 * inputs: A 1D or 2D tensor of integer inputs.
1592 * countWeights: A tensor in the same shape as `inputs` indicating the
1593 * weight for each sample value when summing up in `count` mode. Not used
1594 * in `multiHot` or `oneHot` modes.
1595 *
1596 *
1597 * @doc {heading: 'Layers', subheading: 'CategoryEncoding', namespace: 'layers'}
1598 */
1599export declare function categoryEncoding(args: CategoryEncodingArgs): CategoryEncoding;