1 | /**
|
2 | * @license
|
3 | * Copyright 2019 Google LLC. All Rights Reserved.
|
4 | * Licensed under the Apache License, Version 2.0 (the "License");
|
5 | * you may not use this file except in compliance with the License.
|
6 | * You may obtain a copy of the License at
|
7 | *
|
8 | * http://www.apache.org/licenses/LICENSE-2.0
|
9 | *
|
10 | * Unless required by applicable law or agreed to in writing, software
|
11 | * distributed under the License is distributed on an "AS IS" BASIS,
|
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 | * See the License for the specific language governing permissions and
|
14 | * limitations under the License.
|
15 | * =============================================================================
|
16 | */
|
17 | import { InferenceModel, MetaGraph, ModelPredictConfig, ModelTensorInfo, NamedTensorMap, Tensor } from '@tensorflow/tfjs';
|
18 | import { NodeJSKernelBackend } from './nodejs_kernel_backend';
|
19 | /**
|
20 | * Get a key in an object by its value. This is used to get protobuf enum value
|
21 | * from index.
|
22 | *
|
23 | * @param object
|
24 | * @param value
|
25 | */
|
26 | export declare function getEnumKeyFromValue(object: any, value: number): string;
|
27 | /**
|
28 | * Read SavedModel proto message from path.
|
29 | *
|
30 | * @param path Path to SavedModel folder.
|
31 | */
|
32 | export declare function readSavedModelProto(path: string): Promise<any>;
|
33 | /**
|
34 | * Inspect the MetaGraphs of the SavedModel from the provided path. This
|
35 | * function will return an array of `MetaGraphInfo` objects.
|
36 | *
|
37 | * @param path Path to SavedModel folder.
|
38 | *
|
39 | * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
|
40 | */
|
41 | export declare function getMetaGraphsFromSavedModel(path: string): Promise<MetaGraph[]>;
|
42 | /**
|
43 | * Get input and output node names from SavedModel metagraphs info. The
|
44 | * input.output node names will be used when executing a SavedModel signature.
|
45 | *
|
46 | * @param savedModelInfo The MetaGraphInfo array loaded through
|
47 | * getMetaGraphsFromSavedModel().
|
48 | * @param tags The tags of the MetaGraph to get input/output node names from.
|
49 | * @param signature The signature to get input/output node names from.
|
50 | */
|
51 | export declare function getInputAndOutputNodeNameFromMetaGraphInfo(savedModelInfo: MetaGraph[], tags: string[], signature: string): {
|
52 | [key: string]: string;
|
53 | }[];
|
54 | /**
|
55 | * A `tf.TFSavedModel` is a signature loaded from a SavedModel
|
56 | * metagraph, and allows inference execution.
|
57 | *
|
58 | * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
|
59 | */
|
60 | export declare class TFSavedModel implements InferenceModel {
|
61 | private sessionId;
|
62 | private jsid;
|
63 | private inputNodeNames;
|
64 | private outputNodeNames;
|
65 | private backend;
|
66 | private disposed;
|
67 | constructor(sessionId: number, jsid: number, inputNodeNames: {
|
68 | [key: string]: string;
|
69 | }, outputNodeNames: {
|
70 | [key: string]: string;
|
71 | }, backend: NodeJSKernelBackend);
|
72 | /**
|
73 | * Return the array of input tensor info.
|
74 | *
|
75 | * @doc {heading: 'Models', subheading: 'SavedModel'}
|
76 | */
|
77 | readonly inputs: ModelTensorInfo[];
|
78 | /**
|
79 | * Return the array of output tensor info.
|
80 | *
|
81 | * @doc {heading: 'Models', subheading: 'SavedModel'}
|
82 | */
|
83 | readonly outputs: ModelTensorInfo[];
|
84 | /**
|
85 | * Delete the SavedModel from nodeBackend and delete corresponding session in
|
86 | * the C++ backend if the session is only used by this TFSavedModel.
|
87 | *
|
88 | * @doc {heading: 'Models', subheading: 'SavedModel'}
|
89 | */
|
90 | dispose(): void;
|
91 | /**
|
92 | * Execute the inference for the input tensors.
|
93 | *
|
94 | * @param input The input tensors, when there is single input for the model,
|
95 | * inputs param should be a Tensor. For models with multiple inputs, inputs
|
96 | * params should be in either Tensor[] if the input order is fixed, or
|
97 | * otherwise NamedTensorMap format. The keys in the NamedTensorMap are the
|
98 | * name of input tensors in SavedModel signatureDef. It can be found through
|
99 | * `tf.node.getMetaGraphsFromSavedModel()`.
|
100 | *
|
101 | * For batch inference execution, the tensors for each input need to be
|
102 | * concatenated together. For example with mobilenet, the required input shape
|
103 | * is [1, 244, 244, 3], which represents the [batch, height, width, channel].
|
104 | * If we are provide a batched data of 100 images, the input tensor should be
|
105 | * in the shape of [100, 244, 244, 3].
|
106 | *
|
107 | * @param config Prediction configuration for specifying the batch size.
|
108 | *
|
109 | * @returns Inference result tensors. The output would be single Tensor if
|
110 | * model has single output node, otherwise Tensor[] or NamedTensorMap[] will
|
111 | * be returned for model with multiple outputs.
|
112 | *
|
113 | * @doc {heading: 'Models', subheading: 'SavedModel'}
|
114 | */
|
115 | predict(inputs: Tensor | Tensor[] | NamedTensorMap, config?: ModelPredictConfig): Tensor | Tensor[] | NamedTensorMap;
|
116 | /**
|
117 | * Execute the inference for the input tensors and return activation
|
118 | * values for specified output node names without batching.
|
119 | *
|
120 | * @param input The input tensors, when there is single input for the model,
|
121 | * inputs param should be a Tensor. For models with multiple inputs, inputs
|
122 | * params should be in either Tensor[] if the input order is fixed, or
|
123 | * otherwise NamedTensorMap format.
|
124 | *
|
125 | * @param outputs string|string[]. List of output node names to retrieve
|
126 | * activation from.
|
127 | *
|
128 | * @returns Activation values for the output nodes result tensors. The return
|
129 | * type matches specified parameter outputs type. The output would be single
|
130 | * Tensor if single output is specified, otherwise Tensor[] for multiple
|
131 | * outputs.
|
132 | *
|
133 | * @doc {heading: 'Models', subheading: 'SavedModel'}
|
134 | */
|
135 | execute(inputs: Tensor | Tensor[] | NamedTensorMap, outputs: string | string[]): Tensor | Tensor[];
|
136 | }
|
137 | /**
|
138 | * Load a TensorFlow SavedModel from disk. TensorFlow SavedModel is different
|
139 | * from TensorFlow.js model format. A SavedModel is a directory containing
|
140 | * serialized signatures and the states needed to run them. The directory has a
|
141 | * saved_model.pb (or saved_model.pbtxt) file storing the actual TensorFlow
|
142 | * program, or model, and a set of named signatures, each identifying a
|
143 | * function. The directory also has a variables directory contains a standard
|
144 | * training checkpoint. The directory may also has a assets directory contains
|
145 | * files used by the TensorFlow graph, for example text files used to initialize
|
146 | * vocabulary tables. These are supported datatypes: float32, int32, complex64,
|
147 | * string.For more information, see this guide:
|
148 | * https://www.tensorflow.org/guide/saved_model.
|
149 | *
|
150 | * @param path The path to the SavedModel.
|
151 | * @param tags The tags of the MetaGraph to load. The available tags of a
|
152 | * SavedModel can be retrieved through tf.node.getMetaGraphsFromSavedModel()
|
153 | * API. Defaults to ['serve'].
|
154 | * @param signature The name of the SignatureDef to load. The available
|
155 | * SignatureDefs of a SavedModel can be retrieved through
|
156 | * tf.node.getMetaGraphsFromSavedModel() API. Defaults to 'serving_default'.
|
157 | *
|
158 | * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
|
159 | */
|
160 | export declare function loadSavedModel(path: string, tags?: string[], signature?: string): Promise<TFSavedModel>;
|
161 | export declare function getNumOfSavedModels(): number;
|