1 | /**
|
2 | * @license
|
3 | * Copyright 2018 Google LLC
|
4 | *
|
5 | * Use of this source code is governed by an MIT-style
|
6 | * license that can be found in the LICENSE file or at
|
7 | * https://opensource.org/licenses/MIT.
|
8 | * =============================================================================
|
9 | */
|
10 | /// <amd-module name="@tensorflow/tfjs-layers/dist/exports_layers" />
|
11 | import { InputLayer, InputLayerArgs } from './engine/input_layer';
|
12 | import { Layer, LayerArgs } from './engine/topology';
|
13 | import { input } from './exports';
|
14 | import { ELU, ELULayerArgs, LeakyReLU, LeakyReLULayerArgs, PReLU, PReLULayerArgs, ReLU, ReLULayerArgs, Softmax, SoftmaxLayerArgs, ThresholdedReLU, ThresholdedReLULayerArgs } from './layers/advanced_activations';
|
15 | import { Conv1D, Conv2D, Conv2DTranspose, Conv3D, ConvLayerArgs, Cropping2D, Cropping2DLayerArgs, SeparableConv2D, SeparableConvLayerArgs, UpSampling2D, UpSampling2DLayerArgs } from './layers/convolutional';
|
16 | import { DepthwiseConv2D, DepthwiseConv2DLayerArgs } from './layers/convolutional_depthwise';
|
17 | import { ConvLSTM2D, ConvLSTM2DArgs, ConvLSTM2DCell, ConvLSTM2DCellArgs } from './layers/convolutional_recurrent';
|
18 | import { Activation, ActivationLayerArgs, Dense, DenseLayerArgs, Dropout, DropoutLayerArgs, Flatten, FlattenLayerArgs, Masking, MaskingArgs, Permute, PermuteLayerArgs, RepeatVector, RepeatVectorLayerArgs, Reshape, ReshapeLayerArgs, SpatialDropout1D, SpatialDropout1DLayerConfig } from './layers/core';
|
19 | import { Embedding, EmbeddingLayerArgs } from './layers/embeddings';
|
20 | import { Add, Average, Concatenate, ConcatenateLayerArgs, Dot, DotLayerArgs, Maximum, Minimum, Multiply } from './layers/merge';
|
21 | import { AlphaDropout, AlphaDropoutArgs, GaussianDropout, GaussianDropoutArgs, GaussianNoise, GaussianNoiseArgs } from './layers/noise';
|
22 | import { BatchNormalization, BatchNormalizationLayerArgs, LayerNormalization, LayerNormalizationLayerArgs } from './layers/normalization';
|
23 | import { ZeroPadding2D, ZeroPadding2DLayerArgs } from './layers/padding';
|
24 | import { AveragePooling1D, AveragePooling2D, AveragePooling3D, GlobalAveragePooling1D, GlobalAveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalPooling2DLayerArgs, MaxPooling1D, MaxPooling2D, MaxPooling3D, Pooling1DLayerArgs, Pooling2DLayerArgs, Pooling3DLayerArgs } from './layers/pooling';
|
25 | import { GRU, GRUCell, GRUCellLayerArgs, GRULayerArgs, LSTM, LSTMCell, LSTMCellLayerArgs, LSTMLayerArgs, RNN, RNNCell, RNNLayerArgs, SimpleRNN, SimpleRNNCell, SimpleRNNCellLayerArgs, SimpleRNNLayerArgs, StackedRNNCells, StackedRNNCellsArgs } from './layers/recurrent';
|
26 | import { Bidirectional, BidirectionalLayerArgs, TimeDistributed, WrapperLayerArgs } from './layers/wrappers';
|
27 | import { Rescaling, RescalingArgs } from './layers/preprocessing/image_preprocessing';
|
28 | import { CenterCrop, CenterCropArgs } from './layers/preprocessing/center_crop';
|
29 | import { CategoryEncoding, CategoryEncodingArgs } from './layers/preprocessing/category_encoding';
|
30 | import { Resizing, ResizingArgs } from './layers/preprocessing/image_resizing';
|
31 | import { RandomWidth, RandomWidthArgs } from './layers/preprocessing/random_width';
|
32 | /**
|
33 | * An input layer is an entry point into a `tf.LayersModel`.
|
34 | *
|
35 | * `InputLayer` is generated automatically for `tf.Sequential` models by
|
36 | * specifying the `inputshape` or `batchInputShape` for the first layer. It
|
37 | * should not be specified explicitly. However, it can be useful sometimes,
|
38 | * e.g., when constructing a sequential model from a subset of another
|
39 | * sequential model's layers. Like the code snippet below shows.
|
40 | *
|
41 | * ```js
|
42 | * // Define a model which simply adds two inputs.
|
43 | * const model1 = tf.sequential();
|
44 | * model1.add(tf.layers.dense({inputShape: [4], units: 3, activation: 'relu'}));
|
45 | * model1.add(tf.layers.dense({units: 1, activation: 'sigmoid'}));
|
46 | * model1.summary();
|
47 | * model1.predict(tf.zeros([1, 4])).print();
|
48 | *
|
49 | * // Construct another model, reusing the second layer of `model1` while
|
50 | * // not using the first layer of `model1`. Note that you cannot add the second
|
51 | * // layer of `model` directly as the first layer of the new sequential model,
|
52 | * // because doing so will lead to an error related to the fact that the layer
|
53 | * // is not an input layer. Instead, you need to create an `inputLayer` and add
|
54 | * // it to the new sequential model before adding the reused layer.
|
55 | * const model2 = tf.sequential();
|
56 | * // Use an inputShape that matches the input shape of `model1`'s second
|
57 | * // layer.
|
58 | * model2.add(tf.layers.inputLayer({inputShape: [3]}));
|
59 | * model2.add(model1.layers[1]);
|
60 | * model2.summary();
|
61 | * model2.predict(tf.zeros([1, 3])).print();
|
62 | * ```
|
63 | *
|
64 | * @doc {heading: 'Layers', subheading: 'Inputs', namespace: 'layers'}
|
65 | */
|
66 | export declare function inputLayer(args: InputLayerArgs): InputLayer;
|
67 | /**
|
68 | * Exponential Linear Unit (ELU).
|
69 | *
|
70 | * It follows:
|
71 | * `f(x) = alpha * (exp(x) - 1.) for x < 0`,
|
72 | * `f(x) = x for x >= 0`.
|
73 | *
|
74 | * Input shape:
|
75 | * Arbitrary. Use the configuration `inputShape` when using this layer as the
|
76 | * first layer in a model.
|
77 | *
|
78 | * Output shape:
|
79 | * Same shape as the input.
|
80 | *
|
81 | * References:
|
82 | * - [Fast and Accurate Deep Network Learning by Exponential Linear Units
|
83 | * (ELUs)](https://arxiv.org/abs/1511.07289v1)
|
84 | *
|
85 | * @doc {
|
86 | * heading: 'Layers',
|
87 | * subheading: 'Advanced Activation',
|
88 | * namespace: 'layers'
|
89 | * }
|
90 | */
|
91 | export declare function elu(args?: ELULayerArgs): ELU;
|
92 | /**
|
93 | * Rectified Linear Unit activation function.
|
94 | *
|
95 | * Input shape:
|
96 | * Arbitrary. Use the config field `inputShape` (Array of integers, does
|
97 | * not include the sample axis) when using this layer as the first layer
|
98 | * in a model.
|
99 | *
|
100 | * Output shape:
|
101 | * Same shape as the input.
|
102 | *
|
103 | * @doc {
|
104 | * heading: 'Layers',
|
105 | * subheading: 'Advanced Activation',
|
106 | * namespace: 'layers'
|
107 | * }
|
108 | */
|
109 | export declare function reLU(args?: ReLULayerArgs): ReLU;
|
110 | /**
|
111 | * Leaky version of a rectified linear unit.
|
112 | *
|
113 | * It allows a small gradient when the unit is not active:
|
114 | * `f(x) = alpha * x for x < 0.`
|
115 | * `f(x) = x for x >= 0.`
|
116 | *
|
117 | * Input shape:
|
118 | * Arbitrary. Use the configuration `inputShape` when using this layer as the
|
119 | * first layer in a model.
|
120 | *
|
121 | * Output shape:
|
122 | * Same shape as the input.
|
123 | *
|
124 | * @doc {
|
125 | * heading: 'Layers',
|
126 | * subheading: 'Advanced Activation',
|
127 | * namespace: 'layers'
|
128 | * }
|
129 | */
|
130 | export declare function leakyReLU(args?: LeakyReLULayerArgs): LeakyReLU;
|
131 | /**
|
132 | * Parameterized version of a leaky rectified linear unit.
|
133 | *
|
134 | * It follows
|
135 | * `f(x) = alpha * x for x < 0.`
|
136 | * `f(x) = x for x >= 0.`
|
137 | * wherein `alpha` is a trainable weight.
|
138 | *
|
139 | * Input shape:
|
140 | * Arbitrary. Use the configuration `inputShape` when using this layer as the
|
141 | * first layer in a model.
|
142 | *
|
143 | * Output shape:
|
144 | * Same shape as the input.
|
145 | *
|
146 | * @doc {
|
147 | * heading: 'Layers',
|
148 | * subheading: 'Advanced Activation',
|
149 | * namespace: 'layers'
|
150 | * }
|
151 | */
|
152 | export declare function prelu(args?: PReLULayerArgs): PReLU;
|
153 | /**
|
154 | * Softmax activation layer.
|
155 | *
|
156 | * Input shape:
|
157 | * Arbitrary. Use the configuration `inputShape` when using this layer as the
|
158 | * first layer in a model.
|
159 | *
|
160 | * Output shape:
|
161 | * Same shape as the input.
|
162 | *
|
163 | * @doc {
|
164 | * heading: 'Layers',
|
165 | * subheading: 'Advanced Activation',
|
166 | * namespace: 'layers'
|
167 | * }
|
168 | */
|
169 | export declare function softmax(args?: SoftmaxLayerArgs): Softmax;
|
170 | /**
|
171 | * Thresholded Rectified Linear Unit.
|
172 | *
|
173 | * It follows:
|
174 | * `f(x) = x for x > theta`,
|
175 | * `f(x) = 0 otherwise`.
|
176 | *
|
177 | * Input shape:
|
178 | * Arbitrary. Use the configuration `inputShape` when using this layer as the
|
179 | * first layer in a model.
|
180 | *
|
181 | * Output shape:
|
182 | * Same shape as the input.
|
183 | *
|
184 | * References:
|
185 | * - [Zero-Bias Autoencoders and the Benefits of Co-Adapting
|
186 | * Features](http://arxiv.org/abs/1402.3337)
|
187 | *
|
188 | * @doc {
|
189 | * heading: 'Layers',
|
190 | * subheading: 'Advanced Activation',
|
191 | * namespace: 'layers'
|
192 | * }
|
193 | */
|
194 | export declare function thresholdedReLU(args?: ThresholdedReLULayerArgs): ThresholdedReLU;
|
195 | /**
|
196 | * 1D convolution layer (e.g., temporal convolution).
|
197 | *
|
198 | * This layer creates a convolution kernel that is convolved
|
199 | * with the layer input over a single spatial (or temporal) dimension
|
200 | * to produce a tensor of outputs.
|
201 | *
|
202 | * If `use_bias` is True, a bias vector is created and added to the outputs.
|
203 | *
|
204 | * If `activation` is not `null`, it is applied to the outputs as well.
|
205 | *
|
206 | * When using this layer as the first layer in a model, provide an
|
207 | * `inputShape` argument `Array` or `null`.
|
208 | *
|
209 | * For example, `inputShape` would be:
|
210 | * - `[10, 128]` for sequences of 10 vectors of 128-dimensional vectors
|
211 | * - `[null, 128]` for variable-length sequences of 128-dimensional vectors.
|
212 | *
|
213 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
214 | */
|
215 | export declare function conv1d(args: ConvLayerArgs): Conv1D;
|
216 | /**
|
217 | * 2D convolution layer (e.g. spatial convolution over images).
|
218 | *
|
219 | * This layer creates a convolution kernel that is convolved
|
220 | * with the layer input to produce a tensor of outputs.
|
221 | *
|
222 | * If `useBias` is True, a bias vector is created and added to the outputs.
|
223 | *
|
224 | * If `activation` is not `null`, it is applied to the outputs as well.
|
225 | *
|
226 | * When using this layer as the first layer in a model,
|
227 | * provide the keyword argument `inputShape`
|
228 | * (Array of integers, does not include the sample axis),
|
229 | * e.g. `inputShape=[128, 128, 3]` for 128x128 RGB pictures
|
230 | * in `dataFormat='channelsLast'`.
|
231 | *
|
232 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
233 | */
|
234 | export declare function conv2d(args: ConvLayerArgs): Conv2D;
|
235 | /**
|
236 | * Transposed convolutional layer (sometimes called Deconvolution).
|
237 | *
|
238 | * The need for transposed convolutions generally arises
|
239 | * from the desire to use a transformation going in the opposite direction of
|
240 | * a normal convolution, i.e., from something that has the shape of the output
|
241 | * of some convolution to something that has the shape of its input while
|
242 | * maintaining a connectivity pattern that is compatible with said
|
243 | * convolution.
|
244 | *
|
245 | * When using this layer as the first layer in a model, provide the
|
246 | * configuration `inputShape` (`Array` of integers, does not include the
|
247 | * sample axis), e.g., `inputShape: [128, 128, 3]` for 128x128 RGB pictures in
|
248 | * `dataFormat: 'channelsLast'`.
|
249 | *
|
250 | * Input shape:
|
251 | * 4D tensor with shape:
|
252 | * `[batch, channels, rows, cols]` if `dataFormat` is `'channelsFirst'`.
|
253 | * or 4D tensor with shape
|
254 | * `[batch, rows, cols, channels]` if `dataFormat` is `'channelsLast'`.
|
255 | *
|
256 | * Output shape:
|
257 | * 4D tensor with shape:
|
258 | * `[batch, filters, newRows, newCols]` if `dataFormat` is
|
259 | * `'channelsFirst'`. or 4D tensor with shape:
|
260 | * `[batch, newRows, newCols, filters]` if `dataFormat` is `'channelsLast'`.
|
261 | *
|
262 | * References:
|
263 | * - [A guide to convolution arithmetic for deep
|
264 | * learning](https://arxiv.org/abs/1603.07285v1)
|
265 | * - [Deconvolutional
|
266 | * Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
|
267 | *
|
268 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
269 | */
|
270 | export declare function conv2dTranspose(args: ConvLayerArgs): Conv2DTranspose;
|
271 | /**
|
272 | * 3D convolution layer (e.g. spatial convolution over volumes).
|
273 | *
|
274 | * This layer creates a convolution kernel that is convolved
|
275 | * with the layer input to produce a tensor of outputs.
|
276 | *
|
277 | * If `useBias` is True, a bias vector is created and added to the outputs.
|
278 | *
|
279 | * If `activation` is not `null`, it is applied to the outputs as well.
|
280 | *
|
281 | * When using this layer as the first layer in a model,
|
282 | * provide the keyword argument `inputShape`
|
283 | * (Array of integers, does not include the sample axis),
|
284 | * e.g. `inputShape=[128, 128, 128, 1]` for 128x128x128 grayscale volumes
|
285 | * in `dataFormat='channelsLast'`.
|
286 | *
|
287 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
288 | */
|
289 | export declare function conv3d(args: ConvLayerArgs): Conv3D;
|
290 | export declare function conv3dTranspose(args: ConvLayerArgs): Layer;
|
291 | /**
|
292 | * Depthwise separable 2D convolution.
|
293 | *
|
294 | * Separable convolution consists of first performing
|
295 | * a depthwise spatial convolution
|
296 | * (which acts on each input channel separately)
|
297 | * followed by a pointwise convolution which mixes together the resulting
|
298 | * output channels. The `depthMultiplier` argument controls how many
|
299 | * output channels are generated per input channel in the depthwise step.
|
300 | *
|
301 | * Intuitively, separable convolutions can be understood as
|
302 | * a way to factorize a convolution kernel into two smaller kernels,
|
303 | * or as an extreme version of an Inception block.
|
304 | *
|
305 | * Input shape:
|
306 | * 4D tensor with shape:
|
307 | * `[batch, channels, rows, cols]` if data_format='channelsFirst'
|
308 | * or 4D tensor with shape:
|
309 | * `[batch, rows, cols, channels]` if data_format='channelsLast'.
|
310 | *
|
311 | * Output shape:
|
312 | * 4D tensor with shape:
|
313 | * `[batch, filters, newRows, newCols]` if data_format='channelsFirst'
|
314 | * or 4D tensor with shape:
|
315 | * `[batch, newRows, newCols, filters]` if data_format='channelsLast'.
|
316 | * `rows` and `cols` values might have changed due to padding.
|
317 | *
|
318 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
319 | */
|
320 | export declare function separableConv2d(args: SeparableConvLayerArgs): SeparableConv2D;
|
321 | /**
|
322 | * Cropping layer for 2D input (e.g., image).
|
323 | *
|
324 | * This layer can crop an input
|
325 | * at the top, bottom, left and right side of an image tensor.
|
326 | *
|
327 | * Input shape:
|
328 | * 4D tensor with shape:
|
329 | * - If `dataFormat` is `"channelsLast"`:
|
330 | * `[batch, rows, cols, channels]`
|
331 | * - If `data_format` is `"channels_first"`:
|
332 | * `[batch, channels, rows, cols]`.
|
333 | *
|
334 | * Output shape:
|
335 | * 4D with shape:
|
336 | * - If `dataFormat` is `"channelsLast"`:
|
337 | * `[batch, croppedRows, croppedCols, channels]`
|
338 | * - If `dataFormat` is `"channelsFirst"`:
|
339 | * `[batch, channels, croppedRows, croppedCols]`.
|
340 | *
|
341 | * Examples
|
342 | * ```js
|
343 | *
|
344 | * const model = tf.sequential();
|
345 | * model.add(tf.layers.cropping2D({cropping:[[2, 2], [2, 2]],
|
346 | * inputShape: [128, 128, 3]}));
|
347 | * //now output shape is [batch, 124, 124, 3]
|
348 | * ```
|
349 | *
|
350 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
351 | */
|
352 | export declare function cropping2D(args: Cropping2DLayerArgs): Cropping2D;
|
353 | /**
|
354 | * Upsampling layer for 2D inputs.
|
355 | *
|
356 | * Repeats the rows and columns of the data
|
357 | * by size[0] and size[1] respectively.
|
358 | *
|
359 | *
|
360 | * Input shape:
|
361 | * 4D tensor with shape:
|
362 | * - If `dataFormat` is `"channelsLast"`:
|
363 | * `[batch, rows, cols, channels]`
|
364 | * - If `dataFormat` is `"channelsFirst"`:
|
365 | * `[batch, channels, rows, cols]`
|
366 | *
|
367 | * Output shape:
|
368 | * 4D tensor with shape:
|
369 | * - If `dataFormat` is `"channelsLast"`:
|
370 | * `[batch, upsampledRows, upsampledCols, channels]`
|
371 | * - If `dataFormat` is `"channelsFirst"`:
|
372 | * `[batch, channels, upsampledRows, upsampledCols]`
|
373 | *
|
374 | *
|
375 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
376 | */
|
377 | export declare function upSampling2d(args: UpSampling2DLayerArgs): UpSampling2D;
|
378 | /**
|
379 | * Depthwise separable 2D convolution.
|
380 | *
|
381 | * Depthwise Separable convolutions consists in performing just the first step
|
382 | * in a depthwise spatial convolution (which acts on each input channel
|
383 | * separately). The `depthMultiplier` argument controls how many output channels
|
384 | * are generated per input channel in the depthwise step.
|
385 | *
|
386 | * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}
|
387 | */
|
388 | export declare function depthwiseConv2d(args: DepthwiseConv2DLayerArgs): DepthwiseConv2D;
|
389 | /**
|
390 | * Applies an activation function to an output.
|
391 | *
|
392 | * This layer applies element-wise activation function. Other layers, notably
|
393 | * `dense` can also apply activation functions. Use this isolated activation
|
394 | * function to extract the values before and after the
|
395 | * activation. For instance:
|
396 | *
|
397 | * ```js
|
398 | * const input = tf.input({shape: [5]});
|
399 | * const denseLayer = tf.layers.dense({units: 1});
|
400 | * const activationLayer = tf.layers.activation({activation: 'relu6'});
|
401 | *
|
402 | * // Obtain the output symbolic tensors by applying the layers in order.
|
403 | * const denseOutput = denseLayer.apply(input);
|
404 | * const activationOutput = activationLayer.apply(denseOutput);
|
405 | *
|
406 | * // Create the model based on the inputs.
|
407 | * const model = tf.model({
|
408 | * inputs: input,
|
409 | * outputs: [denseOutput, activationOutput]
|
410 | * });
|
411 | *
|
412 | * // Collect both outputs and print separately.
|
413 | * const [denseOut, activationOut] = model.predict(tf.randomNormal([6, 5]));
|
414 | * denseOut.print();
|
415 | * activationOut.print();
|
416 | * ```
|
417 | *
|
418 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
419 | */
|
420 | export declare function activation(args: ActivationLayerArgs): Activation;
|
421 | /**
|
422 | * Creates a dense (fully connected) layer.
|
423 | *
|
424 | * This layer implements the operation:
|
425 | * `output = activation(dot(input, kernel) + bias)`
|
426 | *
|
427 | * `activation` is the element-wise activation function
|
428 | * passed as the `activation` argument.
|
429 | *
|
430 | * `kernel` is a weights matrix created by the layer.
|
431 | *
|
432 | * `bias` is a bias vector created by the layer (only applicable if `useBias`
|
433 | * is `true`).
|
434 | *
|
435 | * **Input shape:**
|
436 | *
|
437 | * nD `tf.Tensor` with shape: `(batchSize, ..., inputDim)`.
|
438 | *
|
439 | * The most common situation would be
|
440 | * a 2D input with shape `(batchSize, inputDim)`.
|
441 | *
|
442 | * **Output shape:**
|
443 | *
|
444 | * nD tensor with shape: `(batchSize, ..., units)`.
|
445 | *
|
446 | * For instance, for a 2D input with shape `(batchSize, inputDim)`,
|
447 | * the output would have shape `(batchSize, units)`.
|
448 | *
|
449 | * Note: if the input to the layer has a rank greater than 2, then it is
|
450 | * flattened prior to the initial dot product with the kernel.
|
451 | *
|
452 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
453 | */
|
454 | export declare function dense(args: DenseLayerArgs): Dense;
|
455 | /**
|
456 | * Applies
|
457 | * [dropout](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) to
|
458 | * the input.
|
459 | *
|
460 | * Dropout consists in randomly setting a fraction `rate` of input units to 0 at
|
461 | * each update during training time, which helps prevent overfitting.
|
462 | *
|
463 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
464 | */
|
465 | export declare function dropout(args: DropoutLayerArgs): Dropout;
|
466 | /**
|
467 | * Spatial 1D version of Dropout.
|
468 | *
|
469 | * This Layer type performs the same function as the Dropout layer, but it drops
|
470 | * entire 1D feature maps instead of individual elements. For example, if an
|
471 | * input example consists of 3 timesteps and the feature map for each timestep
|
472 | * has a size of 4, a `spatialDropout1d` layer may zero out the feature maps
|
473 | * of the 1st timesteps and 2nd timesteps completely while sparing all feature
|
474 | * elements of the 3rd timestep.
|
475 | *
|
476 | * If adjacent frames (timesteps) are strongly correlated (as is normally the
|
477 | * case in early convolution layers), regular dropout will not regularize the
|
478 | * activation and will otherwise just result in merely an effective learning
|
479 | * rate decrease. In this case, `spatialDropout1d` will help promote
|
480 | * independence among feature maps and should be used instead.
|
481 | *
|
482 | * **Arguments:**
|
483 | * rate: A floating-point number >=0 and <=1. Fraction of the input elements
|
484 | * to drop.
|
485 | *
|
486 | * **Input shape:**
|
487 | * 3D tensor with shape `(samples, timesteps, channels)`.
|
488 | *
|
489 | * **Output shape:**
|
490 | * Same as the input shape.
|
491 | *
|
492 | * References:
|
493 | * - [Efficient Object Localization Using Convolutional
|
494 | * Networks](https://arxiv.org/abs/1411.4280)
|
495 | *
|
496 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
497 | */
|
498 | export declare function spatialDropout1d(args: SpatialDropout1DLayerConfig): SpatialDropout1D;
|
499 | /**
|
500 | * Flattens the input. Does not affect the batch size.
|
501 | *
|
502 | * A `Flatten` layer flattens each batch in its inputs to 1D (making the output
|
503 | * 2D).
|
504 | *
|
505 | * For example:
|
506 | *
|
507 | * ```js
|
508 | * const input = tf.input({shape: [4, 3]});
|
509 | * const flattenLayer = tf.layers.flatten();
|
510 | * // Inspect the inferred output shape of the flatten layer, which
|
511 | * // equals `[null, 12]`. The 2nd dimension is 4 * 3, i.e., the result of the
|
512 | * // flattening. (The 1st dimension is the undermined batch size.)
|
513 | * console.log(JSON.stringify(flattenLayer.apply(input).shape));
|
514 | * ```
|
515 | *
|
516 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
517 | */
|
518 | export declare function flatten(args?: FlattenLayerArgs): Flatten;
|
519 | /**
|
520 | * Repeats the input n times in a new dimension.
|
521 | *
|
522 | * ```js
|
523 | * const model = tf.sequential();
|
524 | * model.add(tf.layers.repeatVector({n: 4, inputShape: [2]}));
|
525 | * const x = tf.tensor2d([[10, 20]]);
|
526 | * // Use the model to do inference on a data point the model hasn't seen
|
527 | * model.predict(x).print();
|
528 | * // output shape is now [batch, 2, 4]
|
529 | * ```
|
530 | *
|
531 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
532 | */
|
533 | export declare function repeatVector(args: RepeatVectorLayerArgs): RepeatVector;
|
534 | /**
|
535 | * Reshapes an input to a certain shape.
|
536 | *
|
537 | * ```js
|
538 | * const input = tf.input({shape: [4, 3]});
|
539 | * const reshapeLayer = tf.layers.reshape({targetShape: [2, 6]});
|
540 | * // Inspect the inferred output shape of the Reshape layer, which
|
541 | * // equals `[null, 2, 6]`. (The 1st dimension is the undermined batch size.)
|
542 | * console.log(JSON.stringify(reshapeLayer.apply(input).shape));
|
543 | * ```
|
544 | *
|
545 | * Input shape:
|
546 | * Arbitrary, although all dimensions in the input shape must be fixed.
|
547 | * Use the configuration `inputShape` when using this layer as the
|
548 | * first layer in a model.
|
549 | *
|
550 | *
|
551 | * Output shape:
|
552 | * [batchSize, targetShape[0], targetShape[1], ...,
|
553 | * targetShape[targetShape.length - 1]].
|
554 | *
|
555 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
556 | */
|
557 | export declare function reshape(args: ReshapeLayerArgs): Reshape;
|
558 | /**
|
559 | * Permutes the dimensions of the input according to a given pattern.
|
560 | *
|
561 | * Useful for, e.g., connecting RNNs and convnets together.
|
562 | *
|
563 | * Example:
|
564 | *
|
565 | * ```js
|
566 | * const model = tf.sequential();
|
567 | * model.add(tf.layers.permute({
|
568 | * dims: [2, 1],
|
569 | * inputShape: [10, 64]
|
570 | * }));
|
571 | * console.log(model.outputShape);
|
572 | * // Now model's output shape is [null, 64, 10], where null is the
|
573 | * // unpermuted sample (batch) dimension.
|
574 | * ```
|
575 | *
|
576 | * Input shape:
|
577 | * Arbitrary. Use the configuration field `inputShape` when using this
|
578 | * layer as the first layer in a model.
|
579 | *
|
580 | * Output shape:
|
581 | * Same rank as the input shape, but with the dimensions re-ordered (i.e.,
|
582 | * permuted) according to the `dims` configuration of this layer.
|
583 | *
|
584 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
585 | */
|
586 | export declare function permute(args: PermuteLayerArgs): Permute;
|
587 | /**
|
588 | * Maps positive integers (indices) into dense vectors of fixed size.
|
589 | * E.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
|
590 | *
|
591 | * **Input shape:** 2D tensor with shape: `[batchSize, sequenceLength]`.
|
592 | *
|
593 | * **Output shape:** 3D tensor with shape: `[batchSize, sequenceLength,
|
594 | * outputDim]`.
|
595 | *
|
596 | * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}
|
597 | */
|
598 | export declare function embedding(args: EmbeddingLayerArgs): Embedding;
|
599 | /**
|
600 | * Layer that performs element-wise addition on an `Array` of inputs.
|
601 | *
|
602 | * It takes as input a list of tensors, all of the same shape, and returns a
|
603 | * single tensor (also of the same shape). The inputs are specified as an
|
604 | * `Array` when the `apply` method of the `Add` layer instance is called. For
|
605 | * example:
|
606 | *
|
607 | * ```js
|
608 | * const input1 = tf.input({shape: [2, 2]});
|
609 | * const input2 = tf.input({shape: [2, 2]});
|
610 | * const addLayer = tf.layers.add();
|
611 | * const sum = addLayer.apply([input1, input2]);
|
612 | * console.log(JSON.stringify(sum.shape));
|
613 | * // You get [null, 2, 2], with the first dimension as the undetermined batch
|
614 | * // dimension.
|
615 | * ```
|
616 | *
|
617 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
618 | */
|
619 | export declare function add(args?: LayerArgs): Add;
|
620 | /**
|
621 | * Layer that performs element-wise averaging on an `Array` of inputs.
|
622 | *
|
623 | * It takes as input a list of tensors, all of the same shape, and returns a
|
624 | * single tensor (also of the same shape). For example:
|
625 | *
|
626 | * ```js
|
627 | * const input1 = tf.input({shape: [2, 2]});
|
628 | * const input2 = tf.input({shape: [2, 2]});
|
629 | * const averageLayer = tf.layers.average();
|
630 | * const average = averageLayer.apply([input1, input2]);
|
631 | * console.log(JSON.stringify(average.shape));
|
632 | * // You get [null, 2, 2], with the first dimension as the undetermined batch
|
633 | * // dimension.
|
634 | * ```
|
635 | *
|
636 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
637 | */
|
638 | export declare function average(args?: LayerArgs): Average;
|
639 | /**
|
640 | * Layer that concatenates an `Array` of inputs.
|
641 | *
|
642 | * It takes a list of tensors, all of the same shape except for the
|
643 | * concatenation axis, and returns a single tensor, the concatenation
|
644 | * of all inputs. For example:
|
645 | *
|
646 | * ```js
|
647 | * const input1 = tf.input({shape: [2, 2]});
|
648 | * const input2 = tf.input({shape: [2, 3]});
|
649 | * const concatLayer = tf.layers.concatenate();
|
650 | * const output = concatLayer.apply([input1, input2]);
|
651 | * console.log(JSON.stringify(output.shape));
|
652 | * // You get [null, 2, 5], with the first dimension as the undetermined batch
|
653 | * // dimension. The last dimension (5) is the result of concatenating the
|
654 | * // last dimensions of the inputs (2 and 3).
|
655 | * ```
|
656 | *
|
657 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
658 | */
|
659 | export declare function concatenate(args?: ConcatenateLayerArgs): Concatenate;
|
660 | /**
|
661 | * Layer that computes the element-wise maximum of an `Array` of inputs.
|
662 | *
|
663 | * It takes as input a list of tensors, all of the same shape, and returns a
|
664 | * single tensor (also of the same shape). For example:
|
665 | *
|
666 | * ```js
|
667 | * const input1 = tf.input({shape: [2, 2]});
|
668 | * const input2 = tf.input({shape: [2, 2]});
|
669 | * const maxLayer = tf.layers.maximum();
|
670 | * const max = maxLayer.apply([input1, input2]);
|
671 | * console.log(JSON.stringify(max.shape));
|
672 | * // You get [null, 2, 2], with the first dimension as the undetermined batch
|
673 | * // dimension.
|
674 | * ```
|
675 | *
|
676 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
677 | */
|
678 | export declare function maximum(args?: LayerArgs): Maximum;
|
679 | /**
|
680 | * Layer that computes the element-wise minimum of an `Array` of inputs.
|
681 | *
|
682 | * It takes as input a list of tensors, all of the same shape, and returns a
|
683 | * single tensor (also of the same shape). For example:
|
684 | *
|
685 | * ```js
|
686 | * const input1 = tf.input({shape: [2, 2]});
|
687 | * const input2 = tf.input({shape: [2, 2]});
|
688 | * const minLayer = tf.layers.minimum();
|
689 | * const min = minLayer.apply([input1, input2]);
|
690 | * console.log(JSON.stringify(min.shape));
|
691 | * // You get [null, 2, 2], with the first dimension as the undetermined batch
|
692 | * // dimension.
|
693 | * ```
|
694 | *
|
695 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
696 | */
|
697 | export declare function minimum(args?: LayerArgs): Minimum;
|
698 | /**
|
699 | * Layer that multiplies (element-wise) an `Array` of inputs.
|
700 | *
|
701 | * It takes as input an Array of tensors, all of the same
|
702 | * shape, and returns a single tensor (also of the same shape).
|
703 | * For example:
|
704 | *
|
705 | * ```js
|
706 | * const input1 = tf.input({shape: [2, 2]});
|
707 | * const input2 = tf.input({shape: [2, 2]});
|
708 | * const input3 = tf.input({shape: [2, 2]});
|
709 | * const multiplyLayer = tf.layers.multiply();
|
710 | * const product = multiplyLayer.apply([input1, input2, input3]);
|
711 | * console.log(product.shape);
|
712 | * // You get [null, 2, 2], with the first dimension as the undetermined batch
|
713 | * // dimension.
|
714 | *
|
715 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
716 | */
|
717 | export declare function multiply(args?: LayerArgs): Multiply;
|
718 | /**
|
719 | * Layer that computes a dot product between samples in two tensors.
|
720 | *
|
721 | * E.g., if applied to a list of two tensors `a` and `b` both of shape
|
722 | * `[batchSize, n]`, the output will be a tensor of shape `[batchSize, 1]`,
|
723 | * where each entry at index `[i, 0]` will be the dot product between
|
724 | * `a[i, :]` and `b[i, :]`.
|
725 | *
|
726 | * Example:
|
727 | *
|
728 | * ```js
|
729 | * const dotLayer = tf.layers.dot({axes: -1});
|
730 | * const x1 = tf.tensor2d([[10, 20], [30, 40]]);
|
731 | * const x2 = tf.tensor2d([[-1, -2], [-3, -4]]);
|
732 | *
|
733 | * // Invoke the layer's apply() method in eager (imperative) mode.
|
734 | * const y = dotLayer.apply([x1, x2]);
|
735 | * y.print();
|
736 | * ```
|
737 | *
|
738 | * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}
|
739 | */
|
740 | export declare function dot(args: DotLayerArgs): Dot;
|
741 | /**
|
742 | * Batch normalization layer (Ioffe and Szegedy, 2014).
|
743 | *
|
744 | * Normalize the activations of the previous layer at each batch,
|
745 | * i.e. applies a transformation that maintains the mean activation
|
746 | * close to 0 and the activation standard deviation close to 1.
|
747 | *
|
748 | * Input shape:
|
749 | * Arbitrary. Use the keyword argument `inputShape` (Array of integers, does
|
750 | * not include the sample axis) when calling the constructor of this class,
|
751 | * if this layer is used as a first layer in a model.
|
752 | *
|
753 | * Output shape:
|
754 | * Same shape as input.
|
755 | *
|
756 | * References:
|
757 | * - [Batch Normalization: Accelerating Deep Network Training by Reducing
|
758 | * Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
|
759 | *
|
760 | * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}
|
761 | */
|
762 | export declare function batchNormalization(args?: BatchNormalizationLayerArgs): BatchNormalization;
|
763 | /**
|
764 | * Layer-normalization layer (Ba et al., 2016).
|
765 | *
|
766 | * Normalizes the activations of the previous layer for each given example in a
|
767 | * batch independently, instead of across a batch like in `batchNormalization`.
|
768 | * In other words, this layer applies a transformation that maintains the mean
|
769 | * activation within each example close to 0 and activation variance close to 1.
|
770 | *
|
771 | * Input shape:
|
772 | * Arbitrary. Use the argument `inputShape` when using this layer as the first
|
773 | * layer in a model.
|
774 | *
|
775 | * Output shape:
|
776 | * Same as input.
|
777 | *
|
778 | * References:
|
779 | * - [Layer Normalization](https://arxiv.org/abs/1607.06450)
|
780 | *
|
781 | * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}
|
782 | */
|
783 | export declare function layerNormalization(args?: LayerNormalizationLayerArgs): LayerNormalization;
|
784 | /**
|
785 | * Zero-padding layer for 2D input (e.g., image).
|
786 | *
|
787 | * This layer can add rows and columns of zeros
|
788 | * at the top, bottom, left and right side of an image tensor.
|
789 | *
|
790 | * Input shape:
|
791 | * 4D tensor with shape:
|
792 | * - If `dataFormat` is `"channelsLast"`:
|
793 | * `[batch, rows, cols, channels]`
|
794 | * - If `data_format` is `"channels_first"`:
|
795 | * `[batch, channels, rows, cols]`.
|
796 | *
|
797 | * Output shape:
|
798 | * 4D with shape:
|
799 | * - If `dataFormat` is `"channelsLast"`:
|
800 | * `[batch, paddedRows, paddedCols, channels]`
|
801 | * - If `dataFormat` is `"channelsFirst"`:
|
802 | * `[batch, channels, paddedRows, paddedCols]`.
|
803 | *
|
804 | * @doc {heading: 'Layers', subheading: 'Padding', namespace: 'layers'}
|
805 | */
|
806 | export declare function zeroPadding2d(args?: ZeroPadding2DLayerArgs): ZeroPadding2D;
|
807 | /**
|
808 | * Average pooling operation for spatial data.
|
809 | *
|
810 | * Input shape: `[batchSize, inLength, channels]`
|
811 | *
|
812 | * Output shape: `[batchSize, pooledLength, channels]`
|
813 | *
|
814 | * `tf.avgPool1d` is an alias.
|
815 | *
|
816 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
817 | */
|
818 | export declare function averagePooling1d(args: Pooling1DLayerArgs): AveragePooling1D;
|
819 | export declare function avgPool1d(args: Pooling1DLayerArgs): AveragePooling1D;
|
820 | export declare function avgPooling1d(args: Pooling1DLayerArgs): AveragePooling1D;
|
821 | /**
|
822 | * Average pooling operation for spatial data.
|
823 | *
|
824 | * Input shape:
|
825 | * - If `dataFormat === CHANNEL_LAST`:
|
826 | * 4D tensor with shape:
|
827 | * `[batchSize, rows, cols, channels]`
|
828 | * - If `dataFormat === CHANNEL_FIRST`:
|
829 | * 4D tensor with shape:
|
830 | * `[batchSize, channels, rows, cols]`
|
831 | *
|
832 | * Output shape
|
833 | * - If `dataFormat === CHANNEL_LAST`:
|
834 | * 4D tensor with shape:
|
835 | * `[batchSize, pooledRows, pooledCols, channels]`
|
836 | * - If `dataFormat === CHANNEL_FIRST`:
|
837 | * 4D tensor with shape:
|
838 | * `[batchSize, channels, pooledRows, pooledCols]`
|
839 | *
|
840 | * `tf.avgPool2d` is an alias.
|
841 | *
|
842 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
843 | */
|
844 | export declare function averagePooling2d(args: Pooling2DLayerArgs): AveragePooling2D;
|
845 | export declare function avgPool2d(args: Pooling2DLayerArgs): AveragePooling2D;
|
846 | export declare function avgPooling2d(args: Pooling2DLayerArgs): AveragePooling2D;
|
847 | /**
|
848 | * Average pooling operation for 3D data.
|
849 | *
|
850 | * Input shape
|
851 | * - If `dataFormat === channelsLast`:
|
852 | * 5D tensor with shape:
|
853 | * `[batchSize, depths, rows, cols, channels]`
|
854 | * - If `dataFormat === channelsFirst`:
|
855 | * 4D tensor with shape:
|
856 | * `[batchSize, channels, depths, rows, cols]`
|
857 | *
|
858 | * Output shape
|
859 | * - If `dataFormat=channelsLast`:
|
860 | * 5D tensor with shape:
|
861 | * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`
|
862 | * - If `dataFormat=channelsFirst`:
|
863 | * 5D tensor with shape:
|
864 | * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`
|
865 | *
|
866 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
867 | */
|
868 | export declare function averagePooling3d(args: Pooling3DLayerArgs): AveragePooling3D;
|
869 | export declare function avgPool3d(args: Pooling3DLayerArgs): AveragePooling3D;
|
870 | export declare function avgPooling3d(args: Pooling3DLayerArgs): AveragePooling3D;
|
871 | /**
|
872 | * Global average pooling operation for temporal data.
|
873 | *
|
874 | * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.
|
875 | *
|
876 | * Output Shape: 2D tensor with shape: `[batchSize, features]`.
|
877 | *
|
878 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
879 | */
|
880 | export declare function globalAveragePooling1d(args?: LayerArgs): GlobalAveragePooling1D;
|
881 | /**
|
882 | * Global average pooling operation for spatial data.
|
883 | *
|
884 | * Input shape:
|
885 | * - If `dataFormat` is `CHANNEL_LAST`:
|
886 | * 4D tensor with shape: `[batchSize, rows, cols, channels]`.
|
887 | * - If `dataFormat` is `CHANNEL_FIRST`:
|
888 | * 4D tensor with shape: `[batchSize, channels, rows, cols]`.
|
889 | *
|
890 | * Output shape:
|
891 | * 2D tensor with shape: `[batchSize, channels]`.
|
892 | *
|
893 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
894 | */
|
895 | export declare function globalAveragePooling2d(args: GlobalPooling2DLayerArgs): GlobalAveragePooling2D;
|
896 | /**
|
897 | * Global max pooling operation for temporal data.
|
898 | *
|
899 | * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.
|
900 | *
|
901 | * Output Shape: 2D tensor with shape: `[batchSize, features]`.
|
902 | *
|
903 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
904 | */
|
905 | export declare function globalMaxPooling1d(args?: LayerArgs): GlobalMaxPooling1D;
|
906 | /**
|
907 | * Global max pooling operation for spatial data.
|
908 | *
|
909 | * Input shape:
|
910 | * - If `dataFormat` is `CHANNEL_LAST`:
|
911 | * 4D tensor with shape: `[batchSize, rows, cols, channels]`.
|
912 | * - If `dataFormat` is `CHANNEL_FIRST`:
|
913 | * 4D tensor with shape: `[batchSize, channels, rows, cols]`.
|
914 | *
|
915 | * Output shape:
|
916 | * 2D tensor with shape: `[batchSize, channels]`.
|
917 | *
|
918 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
919 | */
|
920 | export declare function globalMaxPooling2d(args: GlobalPooling2DLayerArgs): GlobalMaxPooling2D;
|
921 | /**
|
922 | * Max pooling operation for temporal data.
|
923 | *
|
924 | * Input shape: `[batchSize, inLength, channels]`
|
925 | *
|
926 | * Output shape: `[batchSize, pooledLength, channels]`
|
927 | *
|
928 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
929 | */
|
930 | export declare function maxPooling1d(args: Pooling1DLayerArgs): MaxPooling1D;
|
931 | /**
|
932 | * Max pooling operation for spatial data.
|
933 | *
|
934 | * Input shape
|
935 | * - If `dataFormat === CHANNEL_LAST`:
|
936 | * 4D tensor with shape:
|
937 | * `[batchSize, rows, cols, channels]`
|
938 | * - If `dataFormat === CHANNEL_FIRST`:
|
939 | * 4D tensor with shape:
|
940 | * `[batchSize, channels, rows, cols]`
|
941 | *
|
942 | * Output shape
|
943 | * - If `dataFormat=CHANNEL_LAST`:
|
944 | * 4D tensor with shape:
|
945 | * `[batchSize, pooledRows, pooledCols, channels]`
|
946 | * - If `dataFormat=CHANNEL_FIRST`:
|
947 | * 4D tensor with shape:
|
948 | * `[batchSize, channels, pooledRows, pooledCols]`
|
949 | *
|
950 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
951 | */
|
952 | export declare function maxPooling2d(args: Pooling2DLayerArgs): MaxPooling2D;
|
953 | /**
|
954 | * Max pooling operation for 3D data.
|
955 | *
|
956 | * Input shape
|
957 | * - If `dataFormat === channelsLast`:
|
958 | * 5D tensor with shape:
|
959 | * `[batchSize, depths, rows, cols, channels]`
|
960 | * - If `dataFormat === channelsFirst`:
|
961 | * 5D tensor with shape:
|
962 | * `[batchSize, channels, depths, rows, cols]`
|
963 | *
|
964 | * Output shape
|
965 | * - If `dataFormat=channelsLast`:
|
966 | * 5D tensor with shape:
|
967 | * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`
|
968 | * - If `dataFormat=channelsFirst`:
|
969 | * 5D tensor with shape:
|
970 | * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`
|
971 | *
|
972 | * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}
|
973 | */
|
974 | export declare function maxPooling3d(args: Pooling3DLayerArgs): MaxPooling3D;
|
975 | /**
|
976 | * Gated Recurrent Unit - Cho et al. 2014.
|
977 | *
|
978 | * This is an `RNN` layer consisting of one `GRUCell`. However, unlike
|
979 | * the underlying `GRUCell`, the `apply` method of `SimpleRNN` operates
|
980 | * on a sequence of inputs. The shape of the input (not including the first,
|
981 | * batch dimension) needs to be at least 2-D, with the first dimension being
|
982 | * time steps. For example:
|
983 | *
|
984 | * ```js
|
985 | * const rnn = tf.layers.gru({units: 8, returnSequences: true});
|
986 | *
|
987 | * // Create an input with 10 time steps.
|
988 | * const input = tf.input({shape: [10, 20]});
|
989 | * const output = rnn.apply(input);
|
990 | *
|
991 | * console.log(JSON.stringify(output.shape));
|
992 | * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
|
993 | * // same as the sequence length of `input`, due to `returnSequences`: `true`;
|
994 | * // 3rd dimension is the `GRUCell`'s number of units.
|
995 | *
|
996 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
997 | */
|
998 | export declare function gru(args: GRULayerArgs): GRU;
|
999 | /**
|
1000 | * Cell class for `GRU`.
|
1001 | *
|
1002 | * `GRUCell` is distinct from the `RNN` subclass `GRU` in that its
|
1003 | * `apply` method takes the input data of only a single time step and returns
|
1004 | * the cell's output at the time step, while `GRU` takes the input data
|
1005 | * over a number of time steps. For example:
|
1006 | *
|
1007 | * ```js
|
1008 | * const cell = tf.layers.gruCell({units: 2});
|
1009 | * const input = tf.input({shape: [10]});
|
1010 | * const output = cell.apply(input);
|
1011 | *
|
1012 | * console.log(JSON.stringify(output.shape));
|
1013 | * // [null, 10]: This is the cell's output at a single time step. The 1st
|
1014 | * // dimension is the unknown batch size.
|
1015 | * ```
|
1016 | *
|
1017 | * Instance(s) of `GRUCell` can be used to construct `RNN` layers. The
|
1018 | * most typical use of this workflow is to combine a number of cells into a
|
1019 | * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
|
1020 | * RNN. For example:
|
1021 | *
|
1022 | * ```js
|
1023 | * const cells = [
|
1024 | * tf.layers.gruCell({units: 4}),
|
1025 | * tf.layers.gruCell({units: 8}),
|
1026 | * ];
|
1027 | * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
|
1028 | *
|
1029 | * // Create an input with 10 time steps and a length-20 vector at each step.
|
1030 | * const input = tf.input({shape: [10, 20]});
|
1031 | * const output = rnn.apply(input);
|
1032 | *
|
1033 | * console.log(JSON.stringify(output.shape));
|
1034 | * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
|
1035 | * // same as the sequence length of `input`, due to `returnSequences`: `true`;
|
1036 | * // 3rd dimension is the last `gruCell`'s number of units.
|
1037 | * ```
|
1038 | *
|
1039 | * To create an `RNN` consisting of only *one* `GRUCell`, use the
|
1040 | * `tf.layers.gru`.
|
1041 | *
|
1042 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1043 | */
|
1044 | export declare function gruCell(args: GRUCellLayerArgs): GRUCell;
|
1045 | /**
|
1046 | * Long-Short Term Memory layer - Hochreiter 1997.
|
1047 | *
|
1048 | * This is an `RNN` layer consisting of one `LSTMCell`. However, unlike
|
1049 | * the underlying `LSTMCell`, the `apply` method of `LSTM` operates
|
1050 | * on a sequence of inputs. The shape of the input (not including the first,
|
1051 | * batch dimension) needs to be at least 2-D, with the first dimension being
|
1052 | * time steps. For example:
|
1053 | *
|
1054 | * ```js
|
1055 | * const lstm = tf.layers.lstm({units: 8, returnSequences: true});
|
1056 | *
|
1057 | * // Create an input with 10 time steps.
|
1058 | * const input = tf.input({shape: [10, 20]});
|
1059 | * const output = lstm.apply(input);
|
1060 | *
|
1061 | * console.log(JSON.stringify(output.shape));
|
1062 | * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
|
1063 | * // same as the sequence length of `input`, due to `returnSequences`: `true`;
|
1064 | * // 3rd dimension is the `LSTMCell`'s number of units.
|
1065 | *
|
1066 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1067 | */
|
1068 | export declare function lstm(args: LSTMLayerArgs): LSTM;
|
1069 | /**
|
1070 | * Cell class for `LSTM`.
|
1071 | *
|
1072 | * `LSTMCell` is distinct from the `RNN` subclass `LSTM` in that its
|
1073 | * `apply` method takes the input data of only a single time step and returns
|
1074 | * the cell's output at the time step, while `LSTM` takes the input data
|
1075 | * over a number of time steps. For example:
|
1076 | *
|
1077 | * ```js
|
1078 | * const cell = tf.layers.lstmCell({units: 2});
|
1079 | * const input = tf.input({shape: [10]});
|
1080 | * const output = cell.apply(input);
|
1081 | *
|
1082 | * console.log(JSON.stringify(output.shape));
|
1083 | * // [null, 10]: This is the cell's output at a single time step. The 1st
|
1084 | * // dimension is the unknown batch size.
|
1085 | * ```
|
1086 | *
|
1087 | * Instance(s) of `LSTMCell` can be used to construct `RNN` layers. The
|
1088 | * most typical use of this workflow is to combine a number of cells into a
|
1089 | * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
|
1090 | * RNN. For example:
|
1091 | *
|
1092 | * ```js
|
1093 | * const cells = [
|
1094 | * tf.layers.lstmCell({units: 4}),
|
1095 | * tf.layers.lstmCell({units: 8}),
|
1096 | * ];
|
1097 | * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
|
1098 | *
|
1099 | * // Create an input with 10 time steps and a length-20 vector at each step.
|
1100 | * const input = tf.input({shape: [10, 20]});
|
1101 | * const output = rnn.apply(input);
|
1102 | *
|
1103 | * console.log(JSON.stringify(output.shape));
|
1104 | * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
|
1105 | * // same as the sequence length of `input`, due to `returnSequences`: `true`;
|
1106 | * // 3rd dimension is the last `lstmCell`'s number of units.
|
1107 | * ```
|
1108 | *
|
1109 | * To create an `RNN` consisting of only *one* `LSTMCell`, use the
|
1110 | * `tf.layers.lstm`.
|
1111 | *
|
1112 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1113 | */
|
1114 | export declare function lstmCell(args: LSTMCellLayerArgs): LSTMCell;
|
1115 | /**
|
1116 | * Fully-connected RNN where the output is to be fed back to input.
|
1117 | *
|
1118 | * This is an `RNN` layer consisting of one `SimpleRNNCell`. However, unlike
|
1119 | * the underlying `SimpleRNNCell`, the `apply` method of `SimpleRNN` operates
|
1120 | * on a sequence of inputs. The shape of the input (not including the first,
|
1121 | * batch dimension) needs to be at least 2-D, with the first dimension being
|
1122 | * time steps. For example:
|
1123 | *
|
1124 | * ```js
|
1125 | * const rnn = tf.layers.simpleRNN({units: 8, returnSequences: true});
|
1126 | *
|
1127 | * // Create an input with 10 time steps.
|
1128 | * const input = tf.input({shape: [10, 20]});
|
1129 | * const output = rnn.apply(input);
|
1130 | *
|
1131 | * console.log(JSON.stringify(output.shape));
|
1132 | * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
|
1133 | * // same as the sequence length of `input`, due to `returnSequences`: `true`;
|
1134 | * // 3rd dimension is the `SimpleRNNCell`'s number of units.
|
1135 | * ```
|
1136 | *
|
1137 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1138 | */
|
1139 | export declare function simpleRNN(args: SimpleRNNLayerArgs): SimpleRNN;
|
1140 | /**
|
1141 | * Cell class for `SimpleRNN`.
|
1142 | *
|
1143 | * `SimpleRNNCell` is distinct from the `RNN` subclass `SimpleRNN` in that its
|
1144 | * `apply` method takes the input data of only a single time step and returns
|
1145 | * the cell's output at the time step, while `SimpleRNN` takes the input data
|
1146 | * over a number of time steps. For example:
|
1147 | *
|
1148 | * ```js
|
1149 | * const cell = tf.layers.simpleRNNCell({units: 2});
|
1150 | * const input = tf.input({shape: [10]});
|
1151 | * const output = cell.apply(input);
|
1152 | *
|
1153 | * console.log(JSON.stringify(output.shape));
|
1154 | * // [null, 10]: This is the cell's output at a single time step. The 1st
|
1155 | * // dimension is the unknown batch size.
|
1156 | * ```
|
1157 | *
|
1158 | * Instance(s) of `SimpleRNNCell` can be used to construct `RNN` layers. The
|
1159 | * most typical use of this workflow is to combine a number of cells into a
|
1160 | * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an
|
1161 | * RNN. For example:
|
1162 | *
|
1163 | * ```js
|
1164 | * const cells = [
|
1165 | * tf.layers.simpleRNNCell({units: 4}),
|
1166 | * tf.layers.simpleRNNCell({units: 8}),
|
1167 | * ];
|
1168 | * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});
|
1169 | *
|
1170 | * // Create an input with 10 time steps and a length-20 vector at each step.
|
1171 | * const input = tf.input({shape: [10, 20]});
|
1172 | * const output = rnn.apply(input);
|
1173 | *
|
1174 | * console.log(JSON.stringify(output.shape));
|
1175 | * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the
|
1176 | * // same as the sequence length of `input`, due to `returnSequences`: `true`;
|
1177 | * // 3rd dimension is the last `SimpleRNNCell`'s number of units.
|
1178 | * ```
|
1179 | *
|
1180 | * To create an `RNN` consisting of only *one* `SimpleRNNCell`, use the
|
1181 | * `tf.layers.simpleRNN`.
|
1182 | *
|
1183 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1184 | */
|
1185 | export declare function simpleRNNCell(args: SimpleRNNCellLayerArgs): SimpleRNNCell;
|
1186 | /**
|
1187 | * Convolutional LSTM layer - Xingjian Shi 2015.
|
1188 | *
|
1189 | * This is a `ConvRNN2D` layer consisting of one `ConvLSTM2DCell`. However,
|
1190 | * unlike the underlying `ConvLSTM2DCell`, the `apply` method of `ConvLSTM2D`
|
1191 | * operates on a sequence of inputs. The shape of the input (not including the
|
1192 | * first, batch dimension) needs to be 4-D, with the first dimension being time
|
1193 | * steps. For example:
|
1194 | *
|
1195 | * ```js
|
1196 | * const filters = 3;
|
1197 | * const kernelSize = 3;
|
1198 | *
|
1199 | * const batchSize = 4;
|
1200 | * const sequenceLength = 2;
|
1201 | * const size = 5;
|
1202 | * const channels = 3;
|
1203 | *
|
1204 | * const inputShape = [batchSize, sequenceLength, size, size, channels];
|
1205 | * const input = tf.ones(inputShape);
|
1206 | *
|
1207 | * const layer = tf.layers.convLstm2d({filters, kernelSize});
|
1208 | *
|
1209 | * const output = layer.apply(input);
|
1210 | * ```
|
1211 | */
|
1212 | /** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */
|
1213 | export declare function convLstm2d(args: ConvLSTM2DArgs): ConvLSTM2D;
|
1214 | /**
|
1215 | * Cell class for `ConvLSTM2D`.
|
1216 | *
|
1217 | * `ConvLSTM2DCell` is distinct from the `ConvRNN2D` subclass `ConvLSTM2D` in
|
1218 | * that its `call` method takes the input data of only a single time step and
|
1219 | * returns the cell's output at the time step, while `ConvLSTM2D` takes the
|
1220 | * input data over a number of time steps. For example:
|
1221 | *
|
1222 | * ```js
|
1223 | * const filters = 3;
|
1224 | * const kernelSize = 3;
|
1225 | *
|
1226 | * const sequenceLength = 1;
|
1227 | * const size = 5;
|
1228 | * const channels = 3;
|
1229 | *
|
1230 | * const inputShape = [sequenceLength, size, size, channels];
|
1231 | * const input = tf.ones(inputShape);
|
1232 | *
|
1233 | * const cell = tf.layers.convLstm2dCell({filters, kernelSize});
|
1234 | *
|
1235 | * cell.build(input.shape);
|
1236 | *
|
1237 | * const outputSize = size - kernelSize + 1;
|
1238 | * const outShape = [sequenceLength, outputSize, outputSize, filters];
|
1239 | *
|
1240 | * const initialH = tf.zeros(outShape);
|
1241 | * const initialC = tf.zeros(outShape);
|
1242 | *
|
1243 | * const [o, h, c] = cell.call([input, initialH, initialC], {});
|
1244 | * ```
|
1245 | */
|
1246 | /** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */
|
1247 | export declare function convLstm2dCell(args: ConvLSTM2DCellArgs): ConvLSTM2DCell;
|
1248 | /**
|
1249 | * Base class for recurrent layers.
|
1250 | *
|
1251 | * Input shape:
|
1252 | * 3D tensor with shape `[batchSize, timeSteps, inputDim]`.
|
1253 | *
|
1254 | * Output shape:
|
1255 | * - if `returnState`, an Array of tensors (i.e., `tf.Tensor`s). The first
|
1256 | * tensor is the output. The remaining tensors are the states at the
|
1257 | * last time step, each with shape `[batchSize, units]`.
|
1258 | * - if `returnSequences`, the output will have shape
|
1259 | * `[batchSize, timeSteps, units]`.
|
1260 | * - else, the output will have shape `[batchSize, units]`.
|
1261 | *
|
1262 | * Masking:
|
1263 | * This layer supports masking for input data with a variable number
|
1264 | * of timesteps. To introduce masks to your data,
|
1265 | * use an embedding layer with the `mask_zero` parameter
|
1266 | * set to `True`.
|
1267 | *
|
1268 | * Notes on using statefulness in RNNs:
|
1269 | * You can set RNN layers to be 'stateful', which means that the states
|
1270 | * computed for the samples in one batch will be reused as initial states
|
1271 | * for the samples in the next batch. This assumes a one-to-one mapping
|
1272 | * between samples in different successive batches.
|
1273 | *
|
1274 | * To enable statefulness:
|
1275 | * - specify `stateful: true` in the layer constructor.
|
1276 | * - specify a fixed batch size for your model, by passing
|
1277 | * if sequential model:
|
1278 | * `batchInputShape=[...]` to the first layer in your model.
|
1279 | * else for functional model with 1 or more Input layers:
|
1280 | * `batchShape=[...]` to all the first layers in your model.
|
1281 | * This is the expected shape of your inputs *including the batch size*.
|
1282 | * It should be a tuple of integers, e.g. `(32, 10, 100)`.
|
1283 | * - specify `shuffle=False` when calling fit().
|
1284 | *
|
1285 | * To reset the states of your model, call `.resetStates()` on either
|
1286 | * a specific layer, or on your entire model.
|
1287 | *
|
1288 | * Note on specifying the initial state of RNNs
|
1289 | * You can specify the initial state of RNN layers symbolically by
|
1290 | * calling them with the option `initialState`. The value of
|
1291 | * `initialState` should be a tensor or list of tensors representing
|
1292 | * the initial state of the RNN layer.
|
1293 | *
|
1294 | * You can specify the initial state of RNN layers numerically by
|
1295 | * calling `resetStates` with the keyword argument `states`. The value of
|
1296 | * `states` should be a numpy array or list of numpy arrays representing
|
1297 | * the initial state of the RNN layer.
|
1298 | *
|
1299 | * Note on passing external constants to RNNs
|
1300 | * You can pass "external" constants to the cell using the `constants`
|
1301 | * keyword argument of `RNN.call` method. This requires that the `cell.call`
|
1302 | * method accepts the same keyword argument `constants`. Such constants
|
1303 | * can be used to condition the cell transformation on additional static
|
1304 | * inputs (not changing over time), a.k.a. an attention mechanism.
|
1305 | *
|
1306 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1307 | */
|
1308 | export declare function rnn(args: RNNLayerArgs): RNN;
|
1309 | /**
|
1310 | * Wrapper allowing a stack of RNN cells to behave as a single cell.
|
1311 | *
|
1312 | * Used to implement efficient stacked RNNs.
|
1313 | *
|
1314 | * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}
|
1315 | */
|
1316 | export declare function stackedRNNCells(args: StackedRNNCellsArgs): StackedRNNCells;
|
1317 | /** @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'} */
|
1318 | export declare function bidirectional(args: BidirectionalLayerArgs): Bidirectional;
|
1319 | /**
|
1320 | * This wrapper applies a layer to every temporal slice of an input.
|
1321 | *
|
1322 | * The input should be at least 3D, and the dimension of the index `1` will be
|
1323 | * considered to be the temporal dimension.
|
1324 | *
|
1325 | * Consider a batch of 32 samples, where each sample is a sequence of 10 vectors
|
1326 | * of 16 dimensions. The batch input shape of the layer is then `[32, 10,
|
1327 | * 16]`, and the `inputShape`, not including the sample dimension, is
|
1328 | * `[10, 16]`.
|
1329 | *
|
1330 | * You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10
|
1331 | * timesteps, independently:
|
1332 | *
|
1333 | * ```js
|
1334 | * const model = tf.sequential();
|
1335 | * model.add(tf.layers.timeDistributed({
|
1336 | * layer: tf.layers.dense({units: 8}),
|
1337 | * inputShape: [10, 16],
|
1338 | * }));
|
1339 | *
|
1340 | * // Now model.outputShape = [null, 10, 8].
|
1341 | * // The output will then have shape `[32, 10, 8]`.
|
1342 | *
|
1343 | * // In subsequent layers, there is no need for `inputShape`:
|
1344 | * model.add(tf.layers.timeDistributed({layer: tf.layers.dense({units: 32})}));
|
1345 | * console.log(JSON.stringify(model.outputs[0].shape));
|
1346 | * // Now model.outputShape = [null, 10, 32].
|
1347 | * ```
|
1348 | *
|
1349 | * The output will then have shape `[32, 10, 32]`.
|
1350 | *
|
1351 | * `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for
|
1352 | * instance a `Conv2D` layer.
|
1353 | *
|
1354 | * ```js
|
1355 | * const model = tf.sequential();
|
1356 | * model.add(tf.layers.timeDistributed({
|
1357 | * layer: tf.layers.conv2d({filters: 64, kernelSize: [3, 3]}),
|
1358 | * inputShape: [10, 299, 299, 3],
|
1359 | * }));
|
1360 | * console.log(JSON.stringify(model.outputs[0].shape));
|
1361 | * ```
|
1362 | *
|
1363 | * @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'}
|
1364 | */
|
1365 | export declare function timeDistributed(args: WrapperLayerArgs): TimeDistributed;
|
1366 | export declare const globalMaxPool1d: typeof globalMaxPooling1d;
|
1367 | export declare const globalMaxPool2d: typeof globalMaxPooling2d;
|
1368 | export declare const maxPool1d: typeof maxPooling1d;
|
1369 | export declare const maxPool2d: typeof maxPooling2d;
|
1370 | export { Layer, RNN, RNNCell, input };
|
1371 | /**
|
1372 | * Apply additive zero-centered Gaussian noise.
|
1373 | *
|
1374 | * As it is a regularization layer, it is only active at training time.
|
1375 | *
|
1376 | * This is useful to mitigate overfitting
|
1377 | * (you could see it as a form of random data augmentation).
|
1378 | * Gaussian Noise (GS) is a natural choice as corruption process
|
1379 | * for real valued inputs.
|
1380 | *
|
1381 | * # Arguments
|
1382 | * stddev: float, standard deviation of the noise distribution.
|
1383 | *
|
1384 | * # Input shape
|
1385 | * Arbitrary. Use the keyword argument `input_shape`
|
1386 | * (tuple of integers, does not include the samples axis)
|
1387 | * when using this layer as the first layer in a model.
|
1388 | *
|
1389 | * # Output shape
|
1390 | * Same shape as input.
|
1391 | *
|
1392 | * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
|
1393 | */
|
1394 | export declare function gaussianNoise(args: GaussianNoiseArgs): GaussianNoise;
|
1395 | /**
|
1396 | * Apply multiplicative 1-centered Gaussian noise.
|
1397 | *
|
1398 | * As it is a regularization layer, it is only active at training time.
|
1399 | *
|
1400 | * Arguments:
|
1401 | * - `rate`: float, drop probability (as with `Dropout`).
|
1402 | * The multiplicative noise will have
|
1403 | * standard deviation `sqrt(rate / (1 - rate))`.
|
1404 | *
|
1405 | * Input shape:
|
1406 | * Arbitrary. Use the keyword argument `inputShape`
|
1407 | * (tuple of integers, does not include the samples axis)
|
1408 | * when using this layer as the first layer in a model.
|
1409 | *
|
1410 | * Output shape:
|
1411 | * Same shape as input.
|
1412 | *
|
1413 | * References:
|
1414 | * - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
|
1415 | * http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
|
1416 | *
|
1417 | * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
|
1418 | */
|
1419 | export declare function gaussianDropout(args: GaussianDropoutArgs): GaussianDropout;
|
1420 | /**
|
1421 | * Applies Alpha Dropout to the input.
|
1422 | *
|
1423 | * As it is a regularization layer, it is only active at training time.
|
1424 | *
|
1425 | * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
|
1426 | * to their original values, in order to ensure the self-normalizing property
|
1427 | * even after this dropout.
|
1428 | * Alpha Dropout fits well to Scaled Exponential Linear Units
|
1429 | * by randomly setting activations to the negative saturation value.
|
1430 | *
|
1431 | * Arguments:
|
1432 | * - `rate`: float, drop probability (as with `Dropout`).
|
1433 | * The multiplicative noise will have
|
1434 | * standard deviation `sqrt(rate / (1 - rate))`.
|
1435 | * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the
|
1436 | * shape for randomly generated keep/drop flags.
|
1437 | *
|
1438 | * Input shape:
|
1439 | * Arbitrary. Use the keyword argument `inputShape`
|
1440 | * (tuple of integers, does not include the samples axis)
|
1441 | * when using this layer as the first layer in a model.
|
1442 | *
|
1443 | * Output shape:
|
1444 | * Same shape as input.
|
1445 | *
|
1446 | * References:
|
1447 | * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
|
1448 | *
|
1449 | * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}
|
1450 | */
|
1451 | export declare function alphaDropout(args: AlphaDropoutArgs): AlphaDropout;
|
1452 | /**
|
1453 | * Masks a sequence by using a mask value to skip timesteps.
|
1454 | *
|
1455 | * If all features for a given sample timestep are equal to `mask_value`,
|
1456 | * then the sample timestep will be masked (skipped) in all downstream layers
|
1457 | * (as long as they support masking).
|
1458 | *
|
1459 | * If any downstream layer does not support masking yet receives such
|
1460 | * an input mask, an exception will be raised.
|
1461 | *
|
1462 | * Arguments:
|
1463 | * - `maskValue`: Either None or mask value to skip.
|
1464 | *
|
1465 | * Input shape:
|
1466 | * Arbitrary. Use the keyword argument `inputShape`
|
1467 | * (tuple of integers, does not include the samples axis)
|
1468 | * when using this layer as the first layer in a model.
|
1469 | *
|
1470 | * Output shape:
|
1471 | * Same shape as input.
|
1472 | *
|
1473 | * @doc {heading: 'Layers', subheading: 'Mask', namespace: 'layers'}
|
1474 | */
|
1475 | export declare function masking(args?: MaskingArgs): Masking;
|
1476 | /**
|
1477 | * A preprocessing layer which rescales input values to a new range.
|
1478 | *
|
1479 | * This layer rescales every value of an input (often an image) by multiplying
|
1480 | * by `scale` and adding `offset`.
|
1481 | *
|
1482 | * For instance:
|
1483 | * 1. To rescale an input in the ``[0, 255]`` range
|
1484 | * to be in the `[0, 1]` range, you would pass `scale=1/255`.
|
1485 | * 2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]`
|
1486 | * range, you would pass `scale=1./127.5, offset=-1`.
|
1487 | * The rescaling is applied both during training and inference. Inputs can be
|
1488 | * of integer or floating point dtype, and by default the layer will output
|
1489 | * floats.
|
1490 | *
|
1491 | * Arguments:
|
1492 | * - `scale`: Float, the scale to apply to the inputs.
|
1493 | * - `offset`: Float, the offset to apply to the inputs.
|
1494 | *
|
1495 | * Input shape:
|
1496 | * Arbitrary.
|
1497 | *
|
1498 | * Output shape:
|
1499 | * Same as input.
|
1500 | *
|
1501 | * @doc {heading: 'Layers', subheading: 'Rescaling', namespace: 'layers'}
|
1502 | */
|
1503 | export declare function rescaling(args?: RescalingArgs): Rescaling;
|
1504 | /**
|
1505 | * A preprocessing layer which center crops images.
|
1506 | *
|
1507 | * This layers crops the central portion of the images to a target size. If an
|
1508 | * image is smaller than the target size, it will be resized and cropped so as
|
1509 | * to return the largest possible window in the image that matches the target
|
1510 | * aspect ratio.
|
1511 | *
|
1512 | * Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
|
1513 | * of integer or floating point dtype.
|
1514 | *
|
1515 | * If the input height/width is even and the target height/width is odd (or
|
1516 | * inversely), the input image is left-padded by 1 pixel.
|
1517 | *
|
1518 | * Arguments:
|
1519 | * `height`: Integer, the height of the output shape.
|
1520 | * `width`: Integer, the width of the output shape.
|
1521 | *
|
1522 | * Input shape:
|
1523 | * 3D (unbatched) or 4D (batched) tensor with shape:
|
1524 | * `(..., height, width, channels)`, in `channelsLast` format.
|
1525 | *
|
1526 | * Output shape:
|
1527 | * 3D (unbatched) or 4D (batched) tensor with shape:
|
1528 | * `(..., targetHeight, targetWidth, channels)`.
|
1529 | *
|
1530 | *
|
1531 | * @doc {heading: 'Layers', subheading: 'CenterCrop', namespace: 'layers'}
|
1532 | */
|
1533 | export declare function centerCrop(args?: CenterCropArgs): CenterCrop;
|
1534 | /**
|
1535 | * A preprocessing layer which resizes images.
|
1536 | * This layer resizes an image input to a target height and width. The input
|
1537 | * should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
|
1538 | * format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0,
|
1539 | * 255]`) and of interger or floating point dtype. By default, the layer will
|
1540 | * output floats.
|
1541 | *
|
1542 | * Arguments:
|
1543 | * - `height`: number, the height for the output tensor.
|
1544 | * - `width`: number, the width for the output tensor.
|
1545 | * - `interpolation`: string, the method for image resizing interpolation.
|
1546 | * - `cropToAspectRatio`: boolean, whether to keep image aspect ratio.
|
1547 | *
|
1548 | * Input shape:
|
1549 | * Arbitrary.
|
1550 | *
|
1551 | * Output shape:
|
1552 | * height, width, num channels.
|
1553 | *
|
1554 | * @doc {heading: 'Layers', subheading: 'Resizing', namespace: 'layers'}
|
1555 | */
|
1556 | export declare function resizing(args?: ResizingArgs): Resizing;
|
1557 | /**
|
1558 | * A preprocessing layer which encodes integer features.
|
1559 | *
|
1560 | * This layer provides options for condensing data into a categorical encoding
|
1561 | * when the total number of tokens are known in advance. It accepts integer
|
1562 | * values as inputs, and it outputs a dense representation of those
|
1563 | * inputs.
|
1564 | *
|
1565 | * Arguments:
|
1566 | *
|
1567 | * numTokens: The total number of tokens the layer should support. All
|
1568 | * inputs to the layer must integers in the range `0 <= value <
|
1569 | * numTokens`, or an error will be thrown.
|
1570 | *
|
1571 | * outputMode: Specification for the output of the layer.
|
1572 | * Defaults to `multiHot`. Values can be `oneHot`, `multiHot` or
|
1573 | * `count`, configuring the layer as follows:
|
1574 | *
|
1575 | * oneHot: Encodes each individual element in the input into an
|
1576 | * array of `numTokens` size, containing a 1 at the element index. If
|
1577 | * the last dimension is size 1, will encode on that dimension. If the
|
1578 | * last dimension is not size 1, will append a new dimension for the
|
1579 | * encoded output.
|
1580 | *
|
1581 | * multiHot: Encodes each sample in the input into a single array
|
1582 | * of `numTokens` size, containing a 1 for each vocabulary term
|
1583 | * present in the sample. Treats the last dimension as the sample
|
1584 | * dimension, if input shape is `(..., sampleLength)`, output shape
|
1585 | * will be `(..., numTokens)`.
|
1586 | *
|
1587 | * count: Like `multiHot`, but the int array contains a count of
|
1588 | * the number of times the token at that index appeared in the sample.
|
1589 | *
|
1590 | * For all output modes, currently only output up to rank 2 is supported.
|
1591 | * Call arguments:
|
1592 | * inputs: A 1D or 2D tensor of integer inputs.
|
1593 | * countWeights: A tensor in the same shape as `inputs` indicating the
|
1594 | * weight for each sample value when summing up in `count` mode. Not used
|
1595 | * in `multiHot` or `oneHot` modes.
|
1596 | *
|
1597 | *
|
1598 | * @doc {heading: 'Layers', subheading: 'CategoryEncoding', namespace: 'layers'}
|
1599 | */
|
1600 | export declare function categoryEncoding(args: CategoryEncodingArgs): CategoryEncoding;
|
1601 | /**
|
1602 | * A preprocessing layer which randomly varies image width during training.
|
1603 | *
|
1604 | * This layer will randomly adjusts the width of a batch of images of a batch
|
1605 | * of images by a random factor.
|
1606 | *
|
1607 | * The input should be a 3D (unbatched) or 4D (batched) tensor in
|
1608 | * the `"channels_last"` image data format. Input pixel values can be of any
|
1609 | * range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point
|
1610 | * dtype. By default, the layer will output floats. By default, this layer is
|
1611 | * inactive during inference. For an overview and full list of preprocessing
|
1612 | * layers, see the preprocessing [guide]
|
1613 | * (https://www.tensorflow.org/guide/keras/preprocessing_layers).
|
1614 | *
|
1615 | * Arguments:
|
1616 | *
|
1617 | * factor:
|
1618 | * A positive float (fraction of original width), or a tuple of size 2
|
1619 | * representing lower and upper bound for resizing vertically.
|
1620 | * When represented as a single float, this value is used for both the upper
|
1621 | * and lower bound. For instance, `factor=(0.2, 0.3)` results in an output
|
1622 | * with width changed by a random amount in the range `[20%, 30%]`.
|
1623 | * `factor=(-0.2, 0.3)` results in an output with width changed by a random
|
1624 | * amount in the range `[-20%, +30%]`. `factor=0.2` results in an output
|
1625 | * with width changed by a random amount in the range `[-20%, +20%]`.
|
1626 | * interpolation:
|
1627 | * String, the interpolation method.
|
1628 | * Defaults to `bilinear`.
|
1629 | * Supports `"bilinear"`, `"nearest"`.
|
1630 | * The tf methods `"bicubic"`, `"area"`, `"lanczos3"`, `"lanczos5"`,
|
1631 | * `"gaussian"`, `"mitchellcubic"` are unimplemented in tfjs.
|
1632 | * seed:
|
1633 | * Integer. Used to create a random seed.
|
1634 | *
|
1635 | * Input shape:
|
1636 | * 3D (unbatched) or 4D (batched) tensor with shape:
|
1637 | * `(..., height, width, channels)`, in `"channels_last"` format.
|
1638 | * Output shape:
|
1639 | * 3D (unbatched) or 4D (batched) tensor with shape:
|
1640 | * `(..., height, random_width, channels)`.
|
1641 | *
|
1642 | *
|
1643 | * @doc {heading: 'Layers', subheading: 'RandomWidth', namespace: 'layers'}
|
1644 | */
|
1645 | export declare function randomWidth(args: RandomWidthArgs): RandomWidth;
|