1 | /// <amd-module name="@tensorflow/tfjs-layers/dist/exports_initializers" />
|
2 | /**
|
3 | * @license
|
4 | * Copyright 2018 Google LLC
|
5 | *
|
6 | * Use of this source code is governed by an MIT-style
|
7 | * license that can be found in the LICENSE file or at
|
8 | * https://opensource.org/licenses/MIT.
|
9 | * =============================================================================
|
10 | */
|
11 | import { ConstantArgs, IdentityArgs, Initializer, OrthogonalArgs, RandomNormalArgs, RandomUniformArgs, SeedOnlyInitializerArgs, TruncatedNormalArgs, VarianceScalingArgs, Zeros } from './initializers';
|
12 | /**
|
13 | * Initializer that generates tensors initialized to 0.
|
14 | *
|
15 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
16 | */
|
17 | export declare function zeros(): Zeros;
|
18 | /**
|
19 | * Initializer that generates tensors initialized to 1.
|
20 | *
|
21 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
22 | */
|
23 | export declare function ones(): Initializer;
|
24 | /**
|
25 | * Initializer that generates values initialized to some constant.
|
26 | *
|
27 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
28 | */
|
29 | export declare function constant(args: ConstantArgs): Initializer;
|
30 | /**
|
31 | * Initializer that generates random values initialized to a uniform
|
32 | * distribution.
|
33 | *
|
34 | * Values will be distributed uniformly between the configured minval and
|
35 | * maxval.
|
36 | *
|
37 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
38 | */
|
39 | export declare function randomUniform(args: RandomUniformArgs): Initializer;
|
40 | /**
|
41 | * Initializer that generates random values initialized to a normal
|
42 | * distribution.
|
43 | *
|
44 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
45 | */
|
46 | export declare function randomNormal(args: RandomNormalArgs): Initializer;
|
47 | /**
|
48 | * Initializer that generates random values initialized to a truncated normal
|
49 | * distribution.
|
50 | *
|
51 | * These values are similar to values from a `RandomNormal` except that values
|
52 | * more than two standard deviations from the mean are discarded and re-drawn.
|
53 | * This is the recommended initializer for neural network weights and filters.
|
54 | *
|
55 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
56 | */
|
57 | export declare function truncatedNormal(args: TruncatedNormalArgs): Initializer;
|
58 | /**
|
59 | * Initializer that generates the identity matrix.
|
60 | * Only use for square 2D matrices.
|
61 | *
|
62 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
63 | */
|
64 | export declare function identity(args: IdentityArgs): Initializer;
|
65 | /**
|
66 | * Initializer capable of adapting its scale to the shape of weights.
|
67 | * With distribution=NORMAL, samples are drawn from a truncated normal
|
68 | * distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
|
69 | * - number of input units in the weight tensor, if mode = FAN_IN.
|
70 | * - number of output units, if mode = FAN_OUT.
|
71 | * - average of the numbers of input and output units, if mode = FAN_AVG.
|
72 | * With distribution=UNIFORM,
|
73 | * samples are drawn from a uniform distribution
|
74 | * within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
|
75 | *
|
76 | * @doc {heading: 'Initializers',namespace: 'initializers'}
|
77 | */
|
78 | export declare function varianceScaling(config: VarianceScalingArgs): Initializer;
|
79 | /**
|
80 | * Glorot uniform initializer, also called Xavier uniform initializer.
|
81 | * It draws samples from a uniform distribution within [-limit, limit]
|
82 | * where `limit` is `sqrt(6 / (fan_in + fan_out))`
|
83 | * where `fan_in` is the number of input units in the weight tensor
|
84 | * and `fan_out` is the number of output units in the weight tensor
|
85 | *
|
86 | * Reference:
|
87 | * Glorot & Bengio, AISTATS 2010
|
88 | * http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.
|
89 | *
|
90 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
91 | */
|
92 | export declare function glorotUniform(args: SeedOnlyInitializerArgs): Initializer;
|
93 | /**
|
94 | * Glorot normal initializer, also called Xavier normal initializer.
|
95 | * It draws samples from a truncated normal distribution centered on 0
|
96 | * with `stddev = sqrt(2 / (fan_in + fan_out))`
|
97 | * where `fan_in` is the number of input units in the weight tensor
|
98 | * and `fan_out` is the number of output units in the weight tensor.
|
99 | *
|
100 | * Reference:
|
101 | * Glorot & Bengio, AISTATS 2010
|
102 | * http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
|
103 | *
|
104 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
105 | */
|
106 | export declare function glorotNormal(args: SeedOnlyInitializerArgs): Initializer;
|
107 | /**
|
108 | * He normal initializer.
|
109 | *
|
110 | * It draws samples from a truncated normal distribution centered on 0
|
111 | * with `stddev = sqrt(2 / fanIn)`
|
112 | * where `fanIn` is the number of input units in the weight tensor.
|
113 | *
|
114 | * Reference:
|
115 | * He et al., http://arxiv.org/abs/1502.01852
|
116 | *
|
117 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
118 | */
|
119 | export declare function heNormal(args: SeedOnlyInitializerArgs): Initializer;
|
120 | /**
|
121 | * He uniform initializer.
|
122 | *
|
123 | * It draws samples from a uniform distribution within [-limit, limit]
|
124 | * where `limit` is `sqrt(6 / fan_in)`
|
125 | * where `fanIn` is the number of input units in the weight tensor.
|
126 | *
|
127 | * Reference:
|
128 | * He et al., http://arxiv.org/abs/1502.01852
|
129 | *
|
130 | * @doc {heading: 'Initializers',namespace: 'initializers'}
|
131 | */
|
132 | export declare function heUniform(args: SeedOnlyInitializerArgs): Initializer;
|
133 | /**
|
134 | * LeCun normal initializer.
|
135 | *
|
136 | * It draws samples from a truncated normal distribution centered on 0
|
137 | * with `stddev = sqrt(1 / fanIn)`
|
138 | * where `fanIn` is the number of input units in the weight tensor.
|
139 | *
|
140 | * References:
|
141 | * [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
|
142 | * [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
|
143 | *
|
144 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
145 | */
|
146 | export declare function leCunNormal(args: SeedOnlyInitializerArgs): Initializer;
|
147 | /**
|
148 | * LeCun uniform initializer.
|
149 | *
|
150 | * It draws samples from a uniform distribution in the interval
|
151 | * `[-limit, limit]` with `limit = sqrt(3 / fanIn)`,
|
152 | * where `fanIn` is the number of input units in the weight tensor.
|
153 | *
|
154 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
155 | */
|
156 | export declare function leCunUniform(args: SeedOnlyInitializerArgs): Initializer;
|
157 | /**
|
158 | * Initializer that generates a random orthogonal matrix.
|
159 | *
|
160 | * Reference:
|
161 | * [Saxe et al., http://arxiv.org/abs/1312.6120](http://arxiv.org/abs/1312.6120)
|
162 | *
|
163 | * @doc {heading: 'Initializers', namespace: 'initializers'}
|
164 | */
|
165 | export declare function orthogonal(args: OrthogonalArgs): Initializer;
|