UNPKG

15.7 kBTypeScriptView Raw
1/**
2 * @license
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 * =============================================================================
9 */
10/// <amd-module name="@tensorflow/tfjs-layers/dist/layers/convolutional" />
11import { fused, serialization, Tensor } from '@tensorflow/tfjs-core';
12import { Activation } from '../activations';
13import { Constraint, ConstraintIdentifier } from '../constraints';
14import { Layer, LayerArgs } from '../engine/topology';
15import { Initializer, InitializerIdentifier } from '../initializers';
16import { ActivationIdentifier } from '../keras_format/activation_config';
17import { DataFormat, InterpolationFormat, PaddingMode, Shape } from '../keras_format/common';
18import { Regularizer, RegularizerIdentifier } from '../regularizers';
19import { Kwargs } from '../types';
20import { LayerVariable } from '../variables';
21/**
22 * Transpose and cast the input before the conv2d.
23 * @param x Input image tensor.
24 * @param dataFormat
25 */
26export declare function preprocessConv2DInput(x: Tensor, dataFormat: DataFormat): Tensor;
27/**
28 * Transpose and cast the input before the conv3d.
29 * @param x Input image tensor.
30 * @param dataFormat
31 */
32export declare function preprocessConv3DInput(x: Tensor, dataFormat: DataFormat): Tensor;
33/**
34 * 1D-convolution with bias added.
35 *
36 * Porting Note: This function does not exist in the Python Keras backend.
37 * It is exactly the same as `conv2d`, except the added `bias`.
38 *
39 * @param x Input tensor, rank-3, of shape `[batchSize, width, inChannels]`.
40 * @param kernel Kernel, rank-3, of shape `[filterWidth, inDepth, outDepth]`.
41 * @param bias Bias, rank-3, of shape `[outDepth]`.
42 * @param strides
43 * @param padding Padding mode.
44 * @param dataFormat Data format.
45 * @param dilationRate
46 * @returns The result of the 1D convolution.
47 * @throws ValueError, if `x`, `kernel` or `bias` is not of the correct rank.
48 */
49export declare function conv1dWithBias(x: Tensor, kernel: Tensor, bias: Tensor, strides?: number, padding?: string, dataFormat?: DataFormat, dilationRate?: number): Tensor;
50/**
51 * 1D-convolution.
52 *
53 * @param x Input tensor, rank-3, of shape `[batchSize, width, inChannels]`.
54 * @param kernel Kernel, rank-3, of shape `[filterWidth, inDepth, outDepth]`.s
55 * @param strides
56 * @param padding Padding mode.
57 * @param dataFormat Data format.
58 * @param dilationRate
59 * @returns The result of the 1D convolution.
60 * @throws ValueError, if `x`, `kernel` or `bias` is not of the correct rank.
61 */
62export declare function conv1d(x: Tensor, kernel: Tensor, strides?: number, padding?: string, dataFormat?: DataFormat, dilationRate?: number): Tensor;
63/**
64 * 2D Convolution
65 * @param x
66 * @param kernel kernel of the convolution.
67 * @param strides strides array.
68 * @param padding padding mode. Default to 'valid'.
69 * @param dataFormat data format. Defaults to 'channelsLast'.
70 * @param dilationRate dilation rate array.
71 * @returns Result of the 2D pooling.
72 */
73export declare function conv2d(x: Tensor, kernel: Tensor, strides?: number[], padding?: string, dataFormat?: DataFormat, dilationRate?: [number, number]): Tensor;
74/**
75 * 2D Convolution with an added bias and optional activation.
76 * Note: This function does not exist in the Python Keras Backend. This function
77 * is exactly the same as `conv2d`, except the added `bias`.
78 */
79export declare function conv2dWithBiasActivation(x: Tensor, kernel: Tensor, bias: Tensor, strides?: number[], padding?: string, dataFormat?: DataFormat, dilationRate?: [number, number], activation?: fused.Activation): Tensor;
80/**
81 * 3D Convolution.
82 * @param x
83 * @param kernel kernel of the convolution.
84 * @param strides strides array.
85 * @param padding padding mode. Default to 'valid'.
86 * @param dataFormat data format. Defaults to 'channelsLast'.
87 * @param dilationRate dilation rate array.
88 * @returns Result of the 3D convolution.
89 */
90export declare function conv3d(x: Tensor, kernel: Tensor, strides?: number[], padding?: string, dataFormat?: DataFormat, dilationRate?: [number, number, number]): Tensor;
91/**
92 * 3D Convolution with an added bias.
93 * Note: This function does not exist in the Python Keras Backend. This function
94 * is exactly the same as `conv3d`, except the added `bias`.
95 */
96export declare function conv3dWithBias(x: Tensor, kernel: Tensor, bias: Tensor, strides?: number[], padding?: string, dataFormat?: DataFormat, dilationRate?: [number, number, number]): Tensor;
97/**
98 * Base LayerConfig for depthwise and non-depthwise convolutional layers.
99 */
100export declare interface BaseConvLayerArgs extends LayerArgs {
101 /**
102 * The dimensions of the convolution window. If kernelSize is a number, the
103 * convolutional window will be square.
104 */
105 kernelSize: number | number[];
106 /**
107 * The strides of the convolution in each dimension. If strides is a number,
108 * strides in both dimensions are equal.
109 *
110 * Specifying any stride value != 1 is incompatible with specifying any
111 * `dilationRate` value != 1.
112 */
113 strides?: number | number[];
114 /**
115 * Padding mode.
116 */
117 padding?: PaddingMode;
118 /**
119 * Format of the data, which determines the ordering of the dimensions in
120 * the inputs.
121 *
122 * `channels_last` corresponds to inputs with shape
123 * `(batch, ..., channels)`
124 *
125 * `channels_first` corresponds to inputs with shape `(batch, channels,
126 * ...)`.
127 *
128 * Defaults to `channels_last`.
129 */
130 dataFormat?: DataFormat;
131 /**
132 * The dilation rate to use for the dilated convolution in each dimension.
133 * Should be an integer or array of two or three integers.
134 *
135 * Currently, specifying any `dilationRate` value != 1 is incompatible with
136 * specifying any `strides` value != 1.
137 */
138 dilationRate?: number | [number] | [number, number] | [number, number, number];
139 /**
140 * Activation function of the layer.
141 *
142 * If you don't specify the activation, none is applied.
143 */
144 activation?: ActivationIdentifier;
145 /**
146 * Whether the layer uses a bias vector. Defaults to `true`.
147 */
148 useBias?: boolean;
149 /**
150 * Initializer for the convolutional kernel weights matrix.
151 */
152 kernelInitializer?: InitializerIdentifier | Initializer;
153 /**
154 * Initializer for the bias vector.
155 */
156 biasInitializer?: InitializerIdentifier | Initializer;
157 /**
158 * Constraint for the convolutional kernel weights.
159 */
160 kernelConstraint?: ConstraintIdentifier | Constraint;
161 /**
162 * Constraint for the bias vector.
163 */
164 biasConstraint?: ConstraintIdentifier | Constraint;
165 /**
166 * Regularizer function applied to the kernel weights matrix.
167 */
168 kernelRegularizer?: RegularizerIdentifier | Regularizer;
169 /**
170 * Regularizer function applied to the bias vector.
171 */
172 biasRegularizer?: RegularizerIdentifier | Regularizer;
173 /**
174 * Regularizer function applied to the activation.
175 */
176 activityRegularizer?: RegularizerIdentifier | Regularizer;
177}
178/**
179 * LayerConfig for non-depthwise convolutional layers.
180 * Applies to non-depthwise convolution of all ranks (e.g, Conv1D, Conv2D,
181 * Conv3D).
182 */
183export declare interface ConvLayerArgs extends BaseConvLayerArgs {
184 /**
185 * The dimensionality of the output space (i.e. the number of filters in the
186 * convolution).
187 */
188 filters: number;
189}
190/**
191 * Abstract convolution layer.
192 */
193export declare abstract class BaseConv extends Layer {
194 protected readonly rank: number;
195 protected readonly kernelSize: number[];
196 protected readonly strides: number[];
197 protected readonly padding: PaddingMode;
198 protected readonly dataFormat: DataFormat;
199 protected readonly activation: Activation;
200 protected readonly useBias: boolean;
201 protected readonly dilationRate: number[];
202 protected readonly biasInitializer?: Initializer;
203 protected readonly biasConstraint?: Constraint;
204 protected readonly biasRegularizer?: Regularizer;
205 protected bias: LayerVariable;
206 readonly DEFAULT_KERNEL_INITIALIZER: InitializerIdentifier;
207 readonly DEFAULT_BIAS_INITIALIZER: InitializerIdentifier;
208 constructor(rank: number, args: BaseConvLayerArgs);
209 protected static verifyArgs(args: BaseConvLayerArgs): void;
210 getConfig(): serialization.ConfigDict;
211}
212/**
213 * Abstract nD convolution layer. Ancestor of convolution layers which reduce
214 * across channels, i.e., Conv1D and Conv2D, but not DepthwiseConv2D.
215 */
216export declare abstract class Conv extends BaseConv {
217 protected readonly filters: number;
218 protected kernel: LayerVariable;
219 protected readonly kernelInitializer?: Initializer;
220 protected readonly kernelConstraint?: Constraint;
221 protected readonly kernelRegularizer?: Regularizer;
222 constructor(rank: number, args: ConvLayerArgs);
223 build(inputShape: Shape | Shape[]): void;
224 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
225 computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
226 getConfig(): serialization.ConfigDict;
227 protected static verifyArgs(args: ConvLayerArgs): void;
228}
229export declare class Conv2D extends Conv {
230 /** @nocollapse */
231 static className: string;
232 constructor(args: ConvLayerArgs);
233 getConfig(): serialization.ConfigDict;
234 protected static verifyArgs(args: ConvLayerArgs): void;
235}
236export declare class Conv3D extends Conv {
237 /** @nocollapse */
238 static className: string;
239 constructor(args: ConvLayerArgs);
240 getConfig(): serialization.ConfigDict;
241 protected static verifyArgs(args: ConvLayerArgs): void;
242}
243export declare class Conv2DTranspose extends Conv2D {
244 /** @nocollapse */
245 static className: string;
246 constructor(args: ConvLayerArgs);
247 build(inputShape: Shape | Shape[]): void;
248 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
249 computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
250 getConfig(): serialization.ConfigDict;
251}
252export declare class Conv3DTranspose extends Conv3D {
253 /** @nocollapse */
254 static className: string;
255 constructor(args: ConvLayerArgs);
256 build(inputShape: Shape | Shape[]): void;
257 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
258 computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
259 getConfig(): serialization.ConfigDict;
260}
261export declare interface SeparableConvLayerArgs extends ConvLayerArgs {
262 /**
263 * The number of depthwise convolution output channels for each input
264 * channel.
265 * The total number of depthwise convolution output channels will be equal
266 * to `filtersIn * depthMultiplier`. Default: 1.
267 */
268 depthMultiplier?: number;
269 /**
270 * Initializer for the depthwise kernel matrix.
271 */
272 depthwiseInitializer?: InitializerIdentifier | Initializer;
273 /**
274 * Initializer for the pointwise kernel matrix.
275 */
276 pointwiseInitializer?: InitializerIdentifier | Initializer;
277 /**
278 * Regularizer function applied to the depthwise kernel matrix.
279 */
280 depthwiseRegularizer?: RegularizerIdentifier | Regularizer;
281 /**
282 * Regularizer function applied to the pointwise kernel matrix.
283 */
284 pointwiseRegularizer?: RegularizerIdentifier | Regularizer;
285 /**
286 * Constraint function applied to the depthwise kernel matrix.
287 */
288 depthwiseConstraint?: ConstraintIdentifier | Constraint;
289 /**
290 * Constraint function applied to the pointwise kernel matrix.
291 */
292 pointwiseConstraint?: ConstraintIdentifier | Constraint;
293}
294export declare class SeparableConv extends Conv {
295 /** @nocollapse */
296 static className: string;
297 readonly depthMultiplier: number;
298 protected readonly depthwiseInitializer?: Initializer;
299 protected readonly depthwiseRegularizer?: Regularizer;
300 protected readonly depthwiseConstraint?: Constraint;
301 protected readonly pointwiseInitializer?: Initializer;
302 protected readonly pointwiseRegularizer?: Regularizer;
303 protected readonly pointwiseConstraint?: Constraint;
304 readonly DEFAULT_DEPTHWISE_INITIALIZER: InitializerIdentifier;
305 readonly DEFAULT_POINTWISE_INITIALIZER: InitializerIdentifier;
306 protected depthwiseKernel: LayerVariable;
307 protected pointwiseKernel: LayerVariable;
308 constructor(rank: number, config?: SeparableConvLayerArgs);
309 build(inputShape: Shape | Shape[]): void;
310 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
311 getConfig(): serialization.ConfigDict;
312}
313export declare class SeparableConv2D extends SeparableConv {
314 /** @nocollapse */
315 static className: string;
316 constructor(args?: SeparableConvLayerArgs);
317}
318export declare class Conv1D extends Conv {
319 /** @nocollapse */
320 static className: string;
321 constructor(args: ConvLayerArgs);
322 getConfig(): serialization.ConfigDict;
323 protected static verifyArgs(args: ConvLayerArgs): void;
324}
325export declare interface Cropping2DLayerArgs extends LayerArgs {
326 /**
327 * Dimension of the cropping along the width and the height.
328 * - If integer: the same symmetric cropping
329 * is applied to width and height.
330 * - If list of 2 integers:
331 * interpreted as two different
332 * symmetric cropping values for height and width:
333 * `[symmetric_height_crop, symmetric_width_crop]`.
334 * - If a list of 2 lists of 2 integers:
335 * interpreted as
336 * `[[top_crop, bottom_crop], [left_crop, right_crop]]`
337 */
338 cropping: number | [number, number] | [[number, number], [number, number]];
339 /**
340 * Format of the data, which determines the ordering of the dimensions in
341 * the inputs.
342 *
343 * `channels_last` corresponds to inputs with shape
344 * `(batch, ..., channels)`
345 *
346 * `channels_first` corresponds to inputs with shape
347 * `(batch, channels, ...)`
348 *
349 * Defaults to `channels_last`.
350 */
351 dataFormat?: DataFormat;
352}
353export declare class Cropping2D extends Layer {
354 /** @nocollapse */
355 static className: string;
356 protected readonly cropping: [[number, number], [number, number]];
357 protected readonly dataFormat: DataFormat;
358 constructor(args: Cropping2DLayerArgs);
359 computeOutputShape(inputShape: Shape): Shape;
360 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
361 getConfig(): serialization.ConfigDict;
362}
363export declare interface UpSampling2DLayerArgs extends LayerArgs {
364 /**
365 * The upsampling factors for rows and columns.
366 *
367 * Defaults to `[2, 2]`.
368 */
369 size?: number[];
370 /**
371 * Format of the data, which determines the ordering of the dimensions in
372 * the inputs.
373 *
374 * `"channelsLast"` corresponds to inputs with shape
375 * `[batch, ..., channels]`
376 *
377 * `"channelsFirst"` corresponds to inputs with shape `[batch, channels,
378 * ...]`.
379 *
380 * Defaults to `"channelsLast"`.
381 */
382 dataFormat?: DataFormat;
383 /**
384 * The interpolation mechanism, one of `"nearest"` or `"bilinear"`, default
385 * to `"nearest"`.
386 */
387 interpolation?: InterpolationFormat;
388}
389export declare class UpSampling2D extends Layer {
390 /** @nocollapse */
391 static className: string;
392 protected readonly DEFAULT_SIZE: number[];
393 protected readonly size: number[];
394 protected readonly dataFormat: DataFormat;
395 protected readonly interpolation: InterpolationFormat;
396 constructor(args: UpSampling2DLayerArgs);
397 computeOutputShape(inputShape: Shape): Shape;
398 call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
399 getConfig(): serialization.ConfigDict;
400}