/** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ import * as tf from '@tensorflow/tfjs'; import { backend_util, BackendTimingInfo, DataId, DataType, KernelBackend, Rank, Scalar, ScalarLike, ShapeMap, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, Tensor5D, TensorInfo } from '@tensorflow/tfjs'; import { TFEOpAttr, TFJSBinding } from './tfjs_binding'; export declare class NodeJSKernelBackend extends KernelBackend { binding: TFJSBinding; isGPUPackage: boolean; isUsingGpuDevice: boolean; private tensorMap; constructor(binding: TFJSBinding, packageName: string); private getDTypeInteger; private typeAttributeFromTensor; private createOutputTensor; private getInputTensorIds; private createReductionOpAttrs; private executeSingleInput; floatPrecision(): 16 | 32; epsilon(): number; /** * Executes a TensorFlow Eager Op that provides one output Tensor. * @param name The name of the Op to execute. * @param opAttrs The list of Op attributes required to execute. * @param inputs The list of input Tensors for the Op. * @return A resulting Tensor from Op execution. */ executeSingleOutput(name: string, opAttrs: TFEOpAttr[], inputs: TensorInfo[]): Tensor; /** * Executes a TensorFlow Eager Op that provides multiple output Tensors. * @param name The name of the Op to execute. * @param opAttrs The list of Op attributes required to execute. * @param inputs The list of input Tensors for the Op. * @param numOutputs The number of output Tensors for Op execution. * @return A resulting Tensor array from Op execution. */ executeMultipleOutputs(name: string, opAttrs: TFEOpAttr[], inputs: Tensor[], numOutputs: number): Tensor[]; numDataIds(): number; dispose(): void; read(dataId: DataId): Promise; readSync(dataId: DataId): backend_util.BackendValues; disposeData(dataId: DataId): void; move(dataId: DataId, values: backend_util.BackendValues, shape: number[], dtype: DataType): void; write(values: backend_util.BackendValues, shape: number[], dtype: DataType): DataId; fill(shape: ShapeMap[R], value: number | string, dtype?: DataType): Tensor; onesLike(x: Tensor): Tensor; zerosLike(x: Tensor): Tensor; stridedSlice(x: T, begin: number[], end: number[], strides: number[]): T; unstack(x: Tensor, axis: number): Tensor[]; batchMatMul(a: Tensor, b: Tensor, transposeA: boolean, transposeB: boolean): Tensor; private applyActivation; fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }: backend_util.FusedConv2DConfig): Tensor4D; fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }: backend_util.FusedBatchMatMulConfig): Tensor3D; slice(x: T, begin: number[], size: number[]): T; reverse(a: T, axis: number[]): T; concat(tensors: Tensor[], axis: number): Tensor; neg(a: T): T; diag(x: Tensor): Tensor; add(a: Tensor, b: Tensor): Tensor; select(condition: Tensor, a: Tensor, b: Tensor): Tensor; addN(tensors: T[]): T; subtract(a: Tensor, b: Tensor): Tensor; multiply(a: Tensor, b: Tensor): Tensor; realDivide(a: Tensor, b: Tensor): Tensor; floorDiv(a: Tensor, b: Tensor): Tensor; divide(a: Tensor, b: Tensor): Tensor; divNoNan(a: Tensor, b: Tensor): Tensor; unsortedSegmentSum(x: T, segmentIds: Tensor1D, numSegments: number): Tensor; sum(x: Tensor, axes: number[]): Tensor; prod(x: Tensor, axes: number[]): Tensor; argMin(x: Tensor, axis: number): Tensor; argMax(x: Tensor, axis: number): Tensor; equal(a: Tensor, b: Tensor): Tensor; notEqual(a: Tensor, b: Tensor): Tensor; less(a: Tensor, b: Tensor): Tensor; lessEqual(a: Tensor, b: Tensor): Tensor; greater(a: Tensor, b: Tensor): Tensor; greaterEqual(a: Tensor, b: Tensor): Tensor; logicalNot(a: T): T; logicalAnd(a: Tensor, b: Tensor): Tensor; logicalOr(a: Tensor, b: Tensor): Tensor; where(condition: Tensor): Tensor2D; topKValues(x: T, k: number): Tensor1D; topKIndices(x: Tensor, k: number): Tensor1D; topk(x: T, k?: number, sorted?: boolean): [T, T]; min(x: Tensor, axes: number[]): Tensor; minimum(a: Tensor, b: Tensor): Tensor; max(x: Tensor, axes: number[]): Tensor; maximum(a: Tensor, b: Tensor): Tensor; all(x: Tensor, axes: number[]): Tensor; any(x: Tensor, axes: number[]): Tensor; ceil(x: T): T; floor(x: T): T; pow(a: T, b: Tensor): T; exp(x: T): T; log(x: T): T; log1p(x: T): T; sqrt(x: T): T; square(x: T): T; relu(x: T): T; relu6(x: T): T; prelu(x: T, a: T): T; elu(x: T): T; eluDer(dy: T, y: T): T; selu(x: T): T; int(x: T): T; clip(x: T, min: number, max: number): T; abs(x: T): T; complexAbs(x: T): T; sigmoid(x: T): T; sin(x: T): T; cos(x: T): T; tan(x: T): T; asin(x: T): T; acos(x: T): T; atan(x: T): T; sinh(x: T): T; cosh(x: T): T; tanh(x: T): T; mod(a: Tensor, b: Tensor): Tensor; round(x: T): T; sign(x: T): T; isNaN(x: T): T; isInf(x: T): T; isFinite(x: T): T; rsqrt(x: T): T; reciprocal(x: T): T; asinh(x: T): T; acosh(x: T): T; atanh(x: T): T; erf(x: T): T; squaredDifference(a: Tensor, b: Tensor): Tensor; expm1(x: T): T; softplus(x: T): T; atan2(a: T, b: T): T; step(x: T, alpha: number): T; conv2d(x: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; conv2dDerInput(dy: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; conv2dDerFilter(x: Tensor4D, dy: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; depthwiseConv2DDerInput(dy: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; depthwiseConv2DDerFilter(x: Tensor4D, dY: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }: backend_util.FusedConv2DConfig): Tensor4D; depthwiseConv2D(input: Tensor4D, filter: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; conv3d(x: Tensor, filter: Tensor, convInfo: backend_util.Conv3DInfo): Tensor; conv3dDerInput(dy: Tensor, filter: Tensor, convInfo: backend_util.Conv3DInfo): Tensor; conv3dDerFilter(x: Tensor, dY: Tensor, convInfo: backend_util.Conv3DInfo): Tensor; maxPool(x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; maxPoolBackprop(dy: Tensor4D, x: Tensor4D, y: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; avgPool(x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; avgPoolBackprop(dy: Tensor4D, x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D; avgPool3d(x: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D; avgPool3dBackprop(dy: Tensor5D, x: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D; maxPool3d(x: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D; maxPool3dBackprop(dy: Tensor5D, x: Tensor5D, y: Tensor5D, convInfo: backend_util.Conv3DInfo): Tensor5D; reshape(x: T, shape: ShapeMap[R]): Tensor; cast(x: T, dtype: DataType): T; tile(x: T, reps: number[]): T; pad(x: T, paddings: Array<[number, number]>, constantValue: number): T; transpose(x: T, perm: number[]): T; gather(x: T, indices: Tensor1D, axis: number): T; gatherND(x: Tensor, indices: Tensor): Tensor; scatterND(indices: Tensor, updates: Tensor, shape: ShapeMap[R]): Tensor; batchToSpaceND(x: T, blockShape: number[], crops: number[][]): T; spaceToBatchND(x: T, blockShape: number[], paddings: number[][]): T; resizeBilinear(x: Tensor4D, newHeight: number, newWidth: number, alignCorners: boolean): Tensor4D; resizeBilinearBackprop(dy: Tensor4D, x: Tensor4D, alignCorners: boolean): Tensor4D; resizeNearestNeighbor(x: Tensor4D, newHeight: number, newWidth: number, alignCorners: boolean): Tensor4D; resizeNearestNeighborBackprop(dy: Tensor4D, x: Tensor4D, alignCorners: boolean): Tensor4D; batchNorm(x: Tensor4D, mean: Tensor4D | Tensor1D, variance: Tensor4D | Tensor1D, offset?: Tensor4D | Tensor1D, scale?: Tensor4D | Tensor1D, varianceEpsilon?: number): Tensor4D; localResponseNormalization4D(x: Tensor4D, radius: number, bias: number, alpha: number, beta: number): Tensor4D; LRNGrad(dy: Tensor4D, inputImage: Tensor4D, outputImage: Tensor4D, radius: number, bias: number, alpha: number, beta: number): Tensor4D; multinomial(logits: Tensor2D, normalized: boolean, numSamples: number, seed: number): Tensor2D; oneHot(indices: Tensor1D, depth: number, onValue: number, offValue: number): Tensor2D; cumsum(x: Tensor, axis: number, exclusive: boolean, reverse: boolean): Tensor; nonMaxSuppression(boxes: Tensor2D, scores: Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number): Tensor1D; fft(x: Tensor): Tensor; ifft(x: Tensor2D): Tensor2D; complex(real: T, imag: T): T; real(input: T): T; imag(input: T): T; cropAndResize(image: Tensor, boxes: Tensor, boxIndex: Tensor, cropSize: [number, number], method: 'bilinear' | 'nearest', extrapolationValue: number): Tensor; depthToSpace(x: Tensor, blockSize: number, dataFormat: string): Tensor; split(value: T, sizeSplits: number[], axis: number): T[]; sparseToDense(sparseIndices: Tensor, sparseValues: Tensor, outputShape: ShapeMap[R], defaultValue: Tensor): Tensor; linspace(start: number, stop: number, num: number): Tensor1D; decodeJpeg(contents: Uint8Array, channels: number, ratio: number, fancyUpscaling: boolean, tryRecoverTruncated: boolean, acceptableFraction: number, dctMethod: string): Tensor3D; decodePng(contents: Uint8Array, channels: number): Tensor3D; decodeBmp(contents: Uint8Array, channels: number): Tensor3D; decodeGif(contents: Uint8Array): Tensor4D; executeEncodeImageOp(name: string, opAttrs: TFEOpAttr[], imageData: Uint8Array, imageShape: number[]): Tensor; encodeJpeg(imageData: Uint8Array, imageShape: number[], format: '' | 'grayscale' | 'rgb', quality: number, progressive: boolean, optimizeSize: boolean, chromaDownsampling: boolean, densityUnit: 'in' | 'cm', xDensity: number, yDensity: number, xmpMetadata: string): Tensor; encodePng(imageData: Uint8Array, imageShape: number[], compression: number): Tensor; deleteSavedModel(id: number): void; loadSavedModelMetaGraph(path: string, tags: string): number; runSavedModel(id: number, inputs: Tensor[], inputOpNames: string[], outputOpNames: string[]): Tensor[]; summaryWriter(logdir: string): Tensor1D; createSummaryFileWriter(resourceHandle: Tensor, logdir: string, maxQueue?: number, flushMillis?: number, filenameSuffix?: string): void; writeScalarSummary(resourceHandle: Tensor, step: number, name: string, value: Scalar | number): void; flushSummaryWriter(resourceHandle: Tensor): void; memory(): { unreliable: boolean; }; time(f: () => void): Promise; getNumOfSavedModels(): number; } /** Returns an instance of the Node.js backend. */ export declare function nodeBackend(): NodeJSKernelBackend; /** Returns the TF dtype for a given DataType. */ export declare function getTFDType(dataType: tf.DataType): number; /** * Creates a TFEOpAttr for a 'type' OpDef attribute from a Tensor or list of * Tensors. */ export declare function createTensorsTypeOpAttr(attrName: string, tensorsOrDtype: tf.Tensor | tf.Tensor[] | tf.DataType): TFEOpAttr; export declare function createOpAttr(attrName: string, tensorsOrDtype: tf.Tensor | tf.Tensor[] | tf.DataType, value: ScalarLike): TFEOpAttr; export declare function ensureTensorflowBackend(): void;