1 | /**
|
2 | * @license
|
3 | * Copyright 2019 Google Inc. All Rights Reserved.
|
4 | * Licensed under the Apache License, Version 2.0 (the "License");
|
5 | * you may not use this file except in compliance with the License.
|
6 | * You may obtain a copy of the License at
|
7 | *
|
8 | * http://www.apache.org/licenses/LICENSE-2.0
|
9 | *
|
10 | * Unless required by applicable law or agreed to in writing, software
|
11 | * distributed under the License is distributed on an "AS IS" BASIS,
|
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 | * See the License for the specific language governing permissions and
|
14 | * limitations under the License.
|
15 | * =============================================================================
|
16 | */
|
17 | import { InferenceModel, MetaGraph, ModelPredictConfig, ModelTensorInfo, NamedTensorMap, Tensor } from '@tensorflow/tfjs';
|
18 | import { NodeJSKernelBackend } from './nodejs_kernel_backend';
|
19 | /**
|
20 | * Get a key in an object by its value. This is used to get protobuf enum value
|
21 | * from index.
|
22 | *
|
23 | * @param object
|
24 | * @param value
|
25 | */
|
26 | export declare function getEnumKeyFromValue(object: any, value: number): string;
|
27 | /**
|
28 | * Read SavedModel proto message from path.
|
29 | *
|
30 | * @param path Path to SavedModel folder.
|
31 | */
|
32 | export declare function readSavedModelProto(path: string): Promise<any>;
|
33 | /**
|
34 | * Inspect the MetaGraphs of the SavedModel from the provided path. This
|
35 | * function will return an array of `MetaGraphInfo` objects.
|
36 | *
|
37 | * @param path Path to SavedModel folder.
|
38 | */
|
39 | /**
|
40 | * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
|
41 | */
|
42 | export declare function getMetaGraphsFromSavedModel(path: string): Promise<MetaGraph[]>;
|
43 | /**
|
44 | * Get input and output node names from SavedModel metagraphs info. The
|
45 | * input.output node names will be used when executing a SavedModel signature.
|
46 | *
|
47 | * @param savedModelInfo The MetaGraphInfo array loaded through
|
48 | * getMetaGraphsFromSavedModel().
|
49 | * @param tags The tags of the MetaGraph to get input/output node names from.
|
50 | * @param signature The signature to get input/output node names from.
|
51 | */
|
52 | export declare function getInputAndOutputNodeNameFromMetaGraphInfo(savedModelInfo: MetaGraph[], tags: string[], signature: string): {
|
53 | [key: string]: string;
|
54 | }[];
|
55 | /**
|
56 | * A `tf.TFSavedModel` is a signature loaded from a SavedModel
|
57 | * metagraph, and allows inference execution.
|
58 | */
|
59 | /**
|
60 | * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
|
61 | */
|
62 | export declare class TFSavedModel implements InferenceModel {
|
63 | private sessionId;
|
64 | private jsid;
|
65 | private inputNodeNames;
|
66 | private outputNodeNames;
|
67 | private backend;
|
68 | private disposed;
|
69 | constructor(sessionId: number, jsid: number, inputNodeNames: {
|
70 | [key: string]: string;
|
71 | }, outputNodeNames: {
|
72 | [key: string]: string;
|
73 | }, backend: NodeJSKernelBackend);
|
74 | /**
|
75 | * Return the array of input tensor info.
|
76 | */
|
77 | /** @doc {heading: 'Models', subheading: 'SavedModel'} */
|
78 | readonly inputs: ModelTensorInfo[];
|
79 | /**
|
80 | * Return the array of output tensor info.
|
81 | */
|
82 | /** @doc {heading: 'Models', subheading: 'SavedModel'} */
|
83 | readonly outputs: ModelTensorInfo[];
|
84 | /**
|
85 | * Delete the SavedModel from nodeBackend and delete corresponding session in
|
86 | * the C++ backend if the session is only used by this TFSavedModel.
|
87 | */
|
88 | /** @doc {heading: 'Models', subheading: 'SavedModel'} */
|
89 | dispose(): void;
|
90 | /**
|
91 | * Execute the inference for the input tensors.
|
92 | *
|
93 | * @param input The input tensors, when there is single input for the model,
|
94 | * inputs param should be a Tensor. For models with multiple inputs, inputs
|
95 | * params should be in either Tensor[] if the input order is fixed, or
|
96 | * otherwise NamedTensorMap format. The keys in the NamedTensorMap are the
|
97 | * name of input tensors in SavedModel signatureDef. It can be found through
|
98 | * `tf.node.getMetaGraphsFromSavedModel()`.
|
99 | *
|
100 | * For batch inference execution, the tensors for each input need to be
|
101 | * concatenated together. For example with mobilenet, the required input shape
|
102 | * is [1, 244, 244, 3], which represents the [batch, height, width, channel].
|
103 | * If we are provide a batched data of 100 images, the input tensor should be
|
104 | * in the shape of [100, 244, 244, 3].
|
105 | *
|
106 | * @param config Prediction configuration for specifying the batch size.
|
107 | *
|
108 | * @returns Inference result tensors. The output would be single Tensor if
|
109 | * model has single output node, otherwise Tensor[] or NamedTensorMap[] will
|
110 | * be returned for model with multiple outputs.
|
111 | */
|
112 | /** @doc {heading: 'Models', subheading: 'SavedModel'} */
|
113 | predict(inputs: Tensor | Tensor[] | NamedTensorMap, config?: ModelPredictConfig): Tensor | Tensor[] | NamedTensorMap;
|
114 | /**
|
115 | * Execute the inference for the input tensors and return activation
|
116 | * values for specified output node names without batching.
|
117 | *
|
118 | * @param input The input tensors, when there is single input for the model,
|
119 | * inputs param should be a Tensor. For models with multiple inputs, inputs
|
120 | * params should be in either Tensor[] if the input order is fixed, or
|
121 | * otherwise NamedTensorMap format.
|
122 | *
|
123 | * @param outputs string|string[]. List of output node names to retrieve
|
124 | * activation from.
|
125 | *
|
126 | * @returns Activation values for the output nodes result tensors. The return
|
127 | * type matches specified parameter outputs type. The output would be single
|
128 | * Tensor if single output is specified, otherwise Tensor[] for multiple
|
129 | * outputs.
|
130 | */
|
131 | /** @doc {heading: 'Models', subheading: 'SavedModel'} */
|
132 | execute(inputs: Tensor | Tensor[] | NamedTensorMap, outputs: string | string[]): Tensor | Tensor[];
|
133 | }
|
134 | /**
|
135 | * Load a TensorFlow SavedModel from disk. TensorFlow SavedModel is different
|
136 | * from TensorFlow.js model format. A SavedModel is a directory containing
|
137 | * serialized signatures and the states needed to run them. The directory has a
|
138 | * saved_model.pb (or saved_model.pbtxt) file storing the actual TensorFlow
|
139 | * program, or model, and a set of named signatures, each identifying a
|
140 | * function. The directory also has a variables directory contains a standard
|
141 | * training checkpoint. The directory may also has a assets directory contains
|
142 | * files used by the TensorFlow graph, for example text files used to initialize
|
143 | * vocabulary tables. These are supported datatypes: float32, int32, complex64,
|
144 | * string.For more information, see this guide:
|
145 | * https://www.tensorflow.org/guide/saved_model.
|
146 | *
|
147 | * @param path The path to the SavedModel.
|
148 | * @param tags The tags of the MetaGraph to load. The available tags of a
|
149 | * SavedModel can be retrieved through tf.node.getMetaGraphsFromSavedModel()
|
150 | * API. Defaults to ['serve'].
|
151 | * @param signature The name of the SignatureDef to load. The available
|
152 | * SignatureDefs of a SavedModel can be retrieved through
|
153 | * tf.node.getMetaGraphsFromSavedModel() API. Defaults to 'serving_default'.
|
154 | */
|
155 | /** @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'} */
|
156 | export declare function loadSavedModel(path: string, tags?: string[], signature?: string): Promise<TFSavedModel>;
|
157 | export declare function getNumOfSavedModels(): number;
|