1 |
|
2 |
|
3 |
|
4 |
|
5 |
|
6 |
|
7 |
|
8 |
|
9 |
|
10 |
|
11 |
|
12 |
|
13 |
|
14 |
|
15 |
|
16 |
|
17 | import * as tf from '@tensorflow/tfjs';
|
18 | import { backend_util, BackendTimingInfo, DataId, DataType, KernelBackend, Rank, Scalar, ShapeMap, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, Tensor5D, TensorInfo } from '@tensorflow/tfjs';
|
19 | import { TFEOpAttr, TFJSBinding } from './tfjs_binding';
|
20 | export declare class NodeJSKernelBackend extends KernelBackend {
|
21 | binding: TFJSBinding;
|
22 | isGPUPackage: boolean;
|
23 | isUsingGpuDevice: boolean;
|
24 | private tensorMap;
|
25 | constructor(binding: TFJSBinding, packageName: string);
|
26 | private getDTypeInteger;
|
27 | private typeAttributeFromTensor;
|
28 | private createOutputTensor;
|
29 | private getInputTensorIds;
|
30 | private createReductionOpAttrs;
|
31 | private executeSingleInput;
|
32 | floatPrecision(): 16 | 32;
|
33 | epsilon(): number;
|
34 | /**
|
35 | * Executes a TensorFlow Eager Op that provides one output Tensor.
|
36 | * @param name The name of the Op to execute.
|
37 | * @param opAttrs The list of Op attributes required to execute.
|
38 | * @param inputs The list of input Tensors for the Op.
|
39 | * @return A resulting Tensor from Op execution.
|
40 | */
|
41 | executeSingleOutput(name: string, opAttrs: TFEOpAttr[], inputs: TensorInfo[]): Tensor;
|
42 | /**
|
43 | * Executes a TensorFlow Eager Op that provides multiple output Tensors.
|
44 | * @param name The name of the Op to execute.
|
45 | * @param opAttrs The list of Op attributes required to execute.
|
46 | * @param inputs The list of input Tensors for the Op.
|
47 | * @param numOutputs The number of output Tensors for Op execution.
|
48 | * @return A resulting Tensor array from Op execution.
|
49 | */
|
50 | executeMultipleOutputs(name: string, opAttrs: TFEOpAttr[], inputs: Tensor[], numOutputs: number): Tensor[];
|
51 | numDataIds(): number;
|
52 | dispose(): void;
|
53 | read(dataId: DataId): Promise<backend_util.BackendValues>;
|
54 | readSync(dataId: DataId): backend_util.BackendValues;
|
55 | disposeData(dataId: DataId): void;
|
56 | move(dataId: DataId, values: backend_util.BackendValues, shape: number[], dtype: DataType): void;
|
57 | write(values: backend_util.BackendValues, shape: number[], dtype: DataType): DataId;
|
58 | fill<R extends Rank>(shape: ShapeMap[R], value: number | string, dtype?: DataType): Tensor<R>;
|
59 | onesLike<R extends Rank>(x: Tensor<R>): Tensor<R>;
|
60 | zerosLike<R extends Rank>(x: Tensor<R>): Tensor<R>;
|
61 | stridedSlice<T extends Tensor>(x: T, begin: number[], end: number[], strides: number[]): T;
|
62 | unstack(x: Tensor, axis: number): Tensor[];
|
63 | batchMatMul(a: Tensor<Rank.R3>, b: Tensor<Rank.R3>, transposeA: boolean, transposeB: boolean): Tensor<Rank.R3>;
|
64 | private applyActivation;
|
65 | fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }: backend_util.FusedConv2DConfig): Tensor4D;
|
66 | fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }: backend_util.FusedBatchMatMulConfig): Tensor3D;
|
67 | slice<T extends Tensor>(x: T, begin: number[], size: number[]): T;
|
68 | reverse<T extends Tensor>(a: T, axis: number[]): T;
|
69 | concat(tensors: Tensor[], axis: number): Tensor;
|
70 | neg<T extends Tensor>(a: T): T;
|
71 | diag(x: Tensor): Tensor;
|
72 | add(a: Tensor, b: Tensor): Tensor;
|
73 | select(condition: Tensor, a: Tensor, b: Tensor): Tensor;
|
74 | addN<T extends Tensor>(tensors: T[]): T;
|
75 | subtract(a: Tensor, b: Tensor): Tensor;
|
76 | multiply(a: Tensor, b: Tensor): Tensor;
|
77 | realDivide(a: Tensor, b: Tensor): Tensor;
|
78 | floorDiv(a: Tensor, b: Tensor): Tensor;
|
79 | divide(a: Tensor, b: Tensor): Tensor;
|
80 | divNoNan(a: Tensor, b: Tensor): Tensor;
|
81 | unsortedSegmentSum<T extends Tensor>(x: T, segmentIds: Tensor1D, numSegments: number): Tensor;
|
82 | sum(x: Tensor, axes: number[]): Tensor;
|
83 | prod(x: Tensor, axes: number[]): Tensor;
|
84 | argMin(x: Tensor, axis: number): Tensor;
|
85 | argMax(x: Tensor, axis: number): Tensor;
|
86 | equal(a: Tensor, b: Tensor): Tensor;
|
87 | notEqual(a: Tensor, b: Tensor): Tensor;
|
88 | less(a: Tensor, b: Tensor): Tensor;
|
89 | lessEqual(a: Tensor, b: Tensor): Tensor;
|
90 | greater(a: Tensor, b: Tensor): Tensor;
|
91 | greaterEqual(a: Tensor, b: Tensor): Tensor;
|
92 | logicalNot<T extends Tensor>(a: T): T;
|
93 | logicalAnd(a: Tensor, b: Tensor): Tensor;
|
94 | logicalOr(a: Tensor, b: Tensor): Tensor;
|
95 | where(condition: Tensor): Tensor2D;
|
96 | topKValues<T extends Tensor>(x: T, k: number): Tensor1D;
|
97 | topKIndices(x: Tensor, k: number): Tensor1D;
|
98 | topk<T extends Tensor>(x: T, k?: number, sorted?: boolean): [T, T];
|
99 | min(x: Tensor, axes: number[]): Tensor;
|
100 | minimum(a: Tensor, b: Tensor): Tensor;
|
101 | max(x: Tensor, axes: number[]): Tensor;
|
102 | maximum(a: Tensor, b: Tensor): Tensor;
|
103 | all(x: Tensor, axes: number[]): Tensor;
|
104 | any(x: Tensor, axes: number[]): Tensor;
|
105 | ceil<T extends Tensor>(x: T): T;
|
106 | floor<T extends Tensor>(x: T): T;
|
107 | pow<T extends Tensor>(a: T, b: Tensor): T;
|
108 | exp<T extends Tensor>(x: T): T;
|
109 | log<T extends Tensor>(x: T): T;
|
110 | log1p<T extends Tensor>(x: T): T;
|
111 | sqrt<T extends Tensor>(x: T): T;
|
112 | square<T extends Tensor>(x: T): T;
|
113 | relu<T extends Tensor>(x: T): T;
|
114 | relu6<T extends Tensor>(x: T): T;
|
115 | prelu<T extends Tensor>(x: T, a: T): T;
|
116 | elu<T extends Tensor>(x: T): T;
|
117 | eluDer<T extends Tensor>(dy: T, y: T): T;
|
118 | selu<T extends Tensor>(x: T): T;
|
119 | int<T extends Tensor>(x: T): T;
|
120 | clip<T extends Tensor>(x: T, min: number, max: number): T;
|
121 | abs<T extends Tensor>(x: T): T;
|
122 | complexAbs<T extends Tensor>(x: T): T;
|
123 | sigmoid<T extends Tensor>(x: T): T;
|
124 | sin<T extends Tensor>(x: T): T;
|
125 | cos<T extends Tensor>(x: T): T;
|
126 | tan<T extends Tensor>(x: T): T;
|
127 | asin<T extends Tensor>(x: T): T;
|
128 | acos<T extends Tensor>(x: T): T;
|
129 | atan<T extends Tensor>(x: T): T;
|
130 | sinh<T extends Tensor>(x: T): T;
|
131 | cosh<T extends Tensor>(x: T): T;
|
132 | tanh<T extends Tensor>(x: T): T;
|
133 | mod(a: Tensor, b: Tensor): Tensor;
|
134 | round<T extends Tensor>(x: T): T;
|
135 | sign<T extends Tensor>(x: T): T;
|
136 | isNaN<T extends Tensor>(x: T): T;
|
137 | isInf<T extends Tensor>(x: T): T;
|
138 | isFinite<T extends Tensor>(x: T): T;
|
139 | rsqrt<T extends Tensor>(x: T): T;
|
140 | reciprocal<T extends Tensor>(x: T): T;
|
141 | asinh<T extends Tensor>(x: T): T;
|
142 | acosh<T extends Tensor>(x: T): T;
|
143 | atanh<T extends Tensor>(x: T): T;
|
144 | erf<T extends Tensor>(x: T): T;
|
145 | squaredDifference(a: Tensor, b: Tensor): Tensor;
|
146 | expm1<T extends Tensor>(x: T): T;
|
147 | softplus<T extends Tensor>(x: T): T;
|
148 | atan2<T extends Tensor>(a: T, b: T): T;
|
149 | step<T extends Tensor>(x: T, alpha: number): T;
|
150 | conv2d(x: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
151 | conv2dDerInput(dy: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
152 | conv2dDerFilter(x: Tensor4D, dy: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
153 | depthwiseConv2DDerInput(dy: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
154 | depthwiseConv2DDerFilter(x: Tensor4D, dY: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
155 | fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }: backend_util.FusedConv2DConfig): Tensor4D;
|
156 | depthwiseConv2D(input: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
157 | conv3d(x: Tensor<Rank.R5>, filter: Tensor<Rank.R5>, convInfo: backend_util.Conv3DInfo): Tensor<Rank.R5>;
|
158 | conv3dDerInput(dy: Tensor<Rank.R5>, filter: Tensor<Rank.R5>, convInfo: backend_util.Conv3DInfo): Tensor<Rank.R5>;
|
159 | conv3dDerFilter(x: Tensor<Rank.R5>, dY: Tensor<Rank.R5>, convInfo: backend_util.Conv3DInfo): Tensor<Rank.R5>;
|
160 | maxPool(x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
161 | maxPoolBackprop(dy: Tensor4D, x: Tensor4D, y: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
162 | avgPool(x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
163 | avgPoolBackprop(dy: Tensor4D, x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D;
|
164 | avgPool3d(x: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D;
|
165 | avgPool3dBackprop(dy: Tensor5D, x: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D;
|
166 | maxPool3d(x: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D;
|
167 | maxPool3dBackprop(dy: Tensor5D, x: Tensor5D, y: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D;
|
168 | reshape<T extends Tensor, R extends Rank>(x: T, shape: ShapeMap[R]): Tensor<R>;
|
169 | cast<T extends Tensor>(x: T, dtype: DataType): T;
|
170 | tile<T extends Tensor>(x: T, reps: number[]): T;
|
171 | pad<T extends Tensor>(x: T, paddings: Array<[number, number]>, constantValue: number): T;
|
172 | transpose<T extends Tensor>(x: T, perm: number[]): T;
|
173 | gather<T extends Tensor>(x: T, indices: Tensor1D, axis: number): T;
|
174 | gatherND(x: Tensor, indices: Tensor): Tensor;
|
175 | scatterND<R extends Rank>(indices: Tensor, updates: Tensor, shape: ShapeMap[R]): Tensor<R>;
|
176 | batchToSpaceND<T extends Tensor>(x: T, blockShape: number[], crops: number[][]): T;
|
177 | spaceToBatchND<T extends Tensor>(x: T, blockShape: number[], paddings: number[][]): T;
|
178 | resizeBilinear(x: Tensor4D, newHeight: number, newWidth: number, alignCorners: boolean): Tensor4D;
|
179 | resizeBilinearBackprop(dy: Tensor4D, x: Tensor4D, alignCorners: boolean): Tensor4D;
|
180 | resizeNearestNeighbor(x: Tensor4D, newHeight: number, newWidth: number, alignCorners: boolean): Tensor4D;
|
181 | resizeNearestNeighborBackprop(dy: Tensor4D, x: Tensor4D, alignCorners: boolean): Tensor4D;
|
182 | batchNorm(x: Tensor4D, mean: Tensor4D | Tensor1D, variance: Tensor4D | Tensor1D, offset?: Tensor4D | Tensor1D, scale?: Tensor4D | Tensor1D, varianceEpsilon?: number): Tensor4D;
|
183 | localResponseNormalization4D(x: Tensor4D, radius: number, bias: number, alpha: number, beta: number): Tensor4D;
|
184 | LRNGrad(dy: Tensor4D, inputImage: Tensor4D, outputImage: Tensor4D, radius: number, bias: number, alpha: number, beta: number): Tensor4D;
|
185 | multinomial(logits: Tensor2D, normalized: boolean, numSamples: number, seed: number): Tensor2D;
|
186 | oneHot(indices: Tensor1D, depth: number, onValue: number, offValue: number): Tensor2D;
|
187 | cumsum(x: Tensor, axis: number, exclusive: boolean, reverse: boolean): Tensor;
|
188 | nonMaxSuppression(boxes: Tensor2D, scores: Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number): Tensor1D;
|
189 | fft(x: Tensor<Rank.R2>): Tensor<Rank.R2>;
|
190 | ifft(x: Tensor2D): Tensor2D;
|
191 | complex<T extends Tensor>(real: T, imag: T): T;
|
192 | real<T extends Tensor>(input: T): T;
|
193 | imag<T extends Tensor>(input: T): T;
|
194 | cropAndResize(image: Tensor<Rank.R4>, boxes: Tensor<Rank.R2>, boxIndex: Tensor<Rank.R1>, cropSize: [number, number], method: 'bilinear' | 'nearest', extrapolationValue: number): Tensor<Rank.R4>;
|
195 | depthToSpace(x: Tensor<Rank.R4>, blockSize: number, dataFormat: string): Tensor<Rank.R4>;
|
196 | split<T extends Tensor>(value: T, sizeSplits: number[], axis: number): T[];
|
197 | sparseToDense<R extends Rank>(sparseIndices: Tensor, sparseValues: Tensor, outputShape: ShapeMap[R], defaultValue: Tensor<Rank.R0>): Tensor<R>;
|
198 | linspace(start: number, stop: number, num: number): Tensor1D;
|
199 | decodeJpeg(contents: Uint8Array, channels: number, ratio: number, fancyUpscaling: boolean, tryRecoverTruncated: boolean, acceptableFraction: number, dctMethod: string): Tensor3D;
|
200 | decodePng(contents: Uint8Array, channels: number): Tensor3D;
|
201 | decodeBmp(contents: Uint8Array, channels: number): Tensor3D;
|
202 | decodeGif(contents: Uint8Array): Tensor4D;
|
203 | executeEncodeImageOp(name: string, opAttrs: TFEOpAttr[], imageData: Uint8Array, imageShape: number[]): Tensor;
|
204 | encodeJpeg(imageData: Uint8Array, imageShape: number[], format: '' | 'grayscale' | 'rgb', quality: number, progressive: boolean, optimizeSize: boolean, chromaDownsampling: boolean, densityUnit: 'in' | 'cm', xDensity: number, yDensity: number, xmpMetadata: string): Tensor;
|
205 | encodePng(imageData: Uint8Array, imageShape: number[], compression: number): Tensor;
|
206 | deleteSavedModel(id: number): void;
|
207 | loadSavedModelMetaGraph(path: string, tags: string): number;
|
208 | runSavedModel(id: number, inputs: Tensor[], inputOpNames: string[], outputOpNames: string[]): Tensor[];
|
209 | summaryWriter(logdir: string): Tensor1D;
|
210 | createSummaryFileWriter(resourceHandle: Tensor, logdir: string, maxQueue?: number, flushMillis?: number, filenameSuffix?: string): void;
|
211 | writeScalarSummary(resourceHandle: Tensor, step: number, name: string, value: Scalar | number): void;
|
212 | flushSummaryWriter(resourceHandle: Tensor): void;
|
213 | memory(): {
|
214 | unreliable: boolean;
|
215 | };
|
216 | time(f: () => void): Promise<BackendTimingInfo>;
|
217 | getNumOfSavedModels(): number;
|
218 | }
|
219 |
|
220 | export declare function nodeBackend(): NodeJSKernelBackend;
|
221 |
|
222 | export declare function getTFDType(dataType: tf.DataType): number;
|
223 |
|
224 |
|
225 |
|
226 |
|
227 | export declare function createTensorsTypeOpAttr(attrName: string, tensorsOrDtype: tf.Tensor | tf.Tensor[] | tf.DataType): TFEOpAttr;
|
228 | export declare function ensureTensorflowBackend(): void;
|