UNPKG

210 kBJavaScriptView Raw
1/**
2 * @license
3 * Copyright 2022 Google LLC. All Rights Reserved.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 * =============================================================================
16 */
17!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("@tensorflow/tfjs-core")):"function"==typeof define&&define.amd?define(["exports","@tensorflow/tfjs-core"],e):e((t="undefined"!=typeof globalThis?globalThis:t||self).tf=t.tf||{},t.tf)}(this,(function(t,e){"use strict";function s(t){if(t&&t.__esModule)return t;var e=Object.create(null);return t&&Object.keys(t).forEach((function(s){if("default"!==s){var i=Object.getOwnPropertyDescriptor(t,s);Object.defineProperty(e,s,i.get?i:{enumerable:!0,get:function(){return t[s]}})}})),e.default=t,e}var i=s(e);class n extends Error{constructor(t){super(t),Object.setPrototypeOf(this,n.prototype)}}class r extends Error{constructor(t){super(t),Object.setPrototypeOf(this,r.prototype)}}class a extends Error{constructor(t){super(t),Object.setPrototypeOf(this,a.prototype)}}class o extends Error{constructor(t){super(t),Object.setPrototypeOf(this,o.prototype)}}class l extends Error{constructor(t){super(t),Object.setPrototypeOf(this,l.prototype)}}class u{constructor(t){this.maxEntries=t||100,this.cache=new Map}get(t){let e;return this.cache.has(t)&&(e=this.cache.get(t),this.cache.delete(t),this.cache.set(t,e)),e}put(t,e){if(this.cache.has(t))this.cache.delete(t);else if(this.cache.size>=this.maxEntries){const t=this.cache.keys().next().value;this.cache.delete(t)}this.cache.set(t,e)}getMaxEntries(){return this.maxEntries}setMaxEntries(t){if(t<0)throw new Error(`The maxEntries of LRU caches must be at least 0, but got ${t}.`);if(this.maxEntries>t)for(let e=0;e<this.maxEntries-t;e++){const t=this.cache.keys().next().value;this.cache.delete(t)}this.maxEntries=t}}function h(t,e){if(Array.isArray(t)){let s=[];for(let i=0;i<e;i++)s=s.concat(t);return s}{const s=new Array(e);return s.fill(t),s}}function c(t,e){if(!t)throw new l(e)}function p(t,e){let s=0;for(const i of t)i===e&&s++;return s}function d(t){return 1===t.length?t[0]:t}function g(t){return Array.isArray(t)?t:[t]}function f(t){const e=t.replace(/(.)([A-Z][a-z0-9]+)/g,"$1_$2").replace(/([a-z])([A-Z])/g,"$1_$2").toLowerCase();return"_"!==e[0]?e:"private"+e}function m(t){return t.length<=1||-1===t.indexOf("_")?t:t.replace(/[_]+(\w|$)/g,((t,e)=>e.toUpperCase()))}let y={};function b(t){if(null==t)return null;const e={};return e.className=t.getClassName(),e.config=t.getConfig(),e}function w(t){if(null!=t&&"object"==typeof t)if(Array.isArray(t))t.forEach((t=>w(t)));else{const e=Object.keys(t);for(const s of e){const e=t[s];null!=e&&"object"==typeof e&&(Array.isArray(e)||"ndarray"!==e.type||"number"!=typeof e.value?w(e):t[s]=e.value)}}}function z(t,e={},s={},i="object",n=!1){if("string"==typeof t){const n=t;let r;if(n in s)r=s[n];else if(n in y)r=y[n];else if(r=e[n],null==r)throw new a(`Unknown ${i}: ${t}. This may be due to one of the following reasons:\n1. The ${i} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.\n2. The custom ${i} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return r}{const r=t;if(null==r.className||null==r.config)throw new a(`${i}: Improper config format: ${JSON.stringify(r)}.\n'className' and 'config' must set.`);const o=r.className;let l,u;if(o in s?[l,u]=s[o]:o in y?[l,u]=y.className:o in e&&([l,u]=e[o]),null==l)throw new a(`Unknown ${i}: ${o}. This may be due to one of the following reasons:\n1. The ${i} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.\n2. The custom ${i} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(null!=u){const t={};for(const e of Object.keys(y))t[e]=y[e];for(const e of Object.keys(s))t[e]=s[e];r.config.customObjects=t;const e=Object.assign({},y);for(const t of Object.keys(s))y[t]=s[t];w(r.config);const i=u(l,r.config,s,n);return y=Object.assign({},e),i}{const t=Object.assign({},y);for(const t of Object.keys(s))y[t]=s[t];const e=new l(r.config);return y=Object.assign({},t),e}}}function k(t,e){return-1*function(t,e){return t<e?-1:t>e?1:0}(t,e)}function S(t){if(null==t)return t;const e=[];for(const s of t)-1===e.indexOf(s)&&e.push(s);return e}function v(t){if(null==t)throw new a(`Invalid value in obj: ${JSON.stringify(t)}`);for(const e in t)if(t.hasOwnProperty(e))return!1;return!0}function N(t,e,s){if(null!=s&&t.indexOf(s)<0)throw new a(`${s} is not a valid ${e}. Valid values are ${t} or null/undefined.`)}function C(t,e,s=0,i=1/0){return c(s>=0),c(i>=s),Array.isArray(t)&&t.length>=s&&t.length<=i&&t.every((t=>typeof t===e))}function A(t,s){Array.isArray(t)?(e.util.assert(t.length>0,(()=>`${s} is unexpectedly an empty array.`)),t.forEach(((t,e)=>A(t,`element ${e+1} of ${s}`)))):e.util.assert(Number.isInteger(t)&&t>0,(()=>`Expected ${s} to be a positive integer, but got ${x(t)}.`))}function x(t){return null===t?"null":Array.isArray(t)?"["+t.map((t=>x(t))).join(",")+"]":"string"==typeof t?`"${t}"`:`${t}`}function I(t){return"relu"===t?"relu":"linear"===t?"linear":"elu"===t?"elu":null}let L=0;function T(){return L++}const E={};function D(t=""){return t in E||(E[t]=0),E[t]+=1,t+E[t].toString()}const $=["channelsFirst","channelsLast"],F=["nearest","bilinear"],O=["valid","same","causal"],R=["max","avg"],_=["sum","mul","concat","ave"],M=new Map;function B(t){N($,"DataFormat",t)}function W(t){N(O,"PaddingMode",t)}function U(t){N(R,"PoolMode",t)}const P=[];function j(t,e){P.push(t);try{const t=e();return P.pop(),t}catch(t){throw P.pop(),t}}function V(t){if(!H(t))throw new Error("Not a valid tensor name: '"+t+"'");return(0===P.length?"":P.join("/")+"/")+t}function q(t){if(!H(t))throw new Error("Not a valid tensor name: '"+t+"'");M.has(t)||M.set(t,0);const e=M.get(t);if(M.set(t,M.get(t)+1),e>0){const s=`${t}_${e}`;return M.set(s,1),s}return t}const J=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function H(t){return!!t.match(J)}function Z(t,e,s){null==e&&(e=0),null==s&&(s=t.length);let i=1;for(let n=e;n<s;++n)i*=t[n];return i}function K(t){if(0===t.length)return Number.NaN;let e=Number.POSITIVE_INFINITY;for(let s=0;s<t.length;s++){const i=t[s];i<e&&(e=i)}return e}function G(t){if(0===t.length)return Number.NaN;let e=Number.NEGATIVE_INFINITY;for(let s=0;s<t.length;s++){const i=t[s];i>e&&(e=i)}return e}function Y(t,e){if(e<t)throw new a(`end (${e}) < begin (${t}) is forbidden.`);const s=[];for(let i=t;i<e;++i)s.push(i);return s}let X;function Q(){return null==X&&(X=e.backend().epsilon()),X}function tt(t,e){return i.cast(t,e)}function et(t,e=-1){const s=t.shape.slice();return e<0&&(e=s.length+e+1),s.splice(e,0,1),i.reshape(t,s)}function st(t,s,n){return e.tidy((()=>{switch(t.rank){case 1:return i.slice1d(t,s,n);case 2:return i.slice2d(t,[s,0],[n,t.shape[1]]);case 3:return i.slice3d(t,[s,0,0],[n,t.shape[1],t.shape[2]]);case 4:return i.slice4d(t,[s,0,0,0],[n,t.shape[1],t.shape[2],t.shape[3]]);case 5:return i.slice(t,[s,0,0,0,0],[n,t.shape[1],t.shape[2],t.shape[3],t.shape[4]]);case 6:return i.slice(t,[s,0,0,0,0,0],[n,t.shape[1],t.shape[2],t.shape[3],t.shape[4],t.shape[5]]);default:throw new a(`sliceAlongFirstAxis() received an unsupported tensor rank: ${t.rank}`)}}))}function it(t,s,n){return e.tidy((()=>{switch(t.rank){case 1:return i.slice1d(t,s,n);case 2:return i.slice2d(t,[0,s],[t.shape[0],n]);case 3:return i.slice3d(t,[0,0,s],[t.shape[0],t.shape[1],n]);case 4:return i.slice4d(t,[0,0,0,s],[t.shape[0],t.shape[1],t.shape[2],n]);default:throw new a(`sliceAlongLastAxis() received an unsupported tensor rank: ${t.rank}`)}}))}function nt(t,s,n,r){return e.tidy((()=>{switch(t.rank){case 1:return i.slice1d(t,s,n);case 2:switch(r){case 1:return st(t,s,n);case 2:return it(t,s,n);default:throw new a(`The axis is not within the rank of the tensor ${r}`)}case 3:switch(r){case 1:return st(t,s,n);case 2:return i.slice3d(t,[0,s,0],[t.shape[0],n,t.shape[2]]);case 3:return it(t,s,n);default:throw new a(`The axis is not within the rank of the tensor ${r}`)}case 4:switch(r){case 1:return st(t,s,n);case 2:return i.slice4d(t,[0,s,0,0],[t.shape[0],n,t.shape[2],t.shape[3]]);case 3:return i.slice4d(t,[0,0,s,0],[t.shape[0],t.shape[1],n,t.shape[3]]);case 4:return it(t,s,n);default:throw new a(`The axis is not within the rank of the tensor ${r}`)}default:throw new a(`sliceAlongLastAxis() received an unsupported tensor rank: ${t.rank}`)}}))}function rt(t,e=-1){let s;return e<0&&(s=t[0].rank,e=0!==s?s:0),e===t[0].rank&&(e=-1),i.concat(t,e)}function at(t,e){switch(t.rank){case 1:return i.concat1d([t,e]);case 2:return i.concat2d([t,e],0);case 3:return i.concat3d([t,e],0);case 4:return i.concat4d([t,e],0);default:throw new a(`concatAlongFirstAxis() received an unsupported tensor rank: ${t.rank}`)}}function ot(t,e){if(Array.isArray(e)||(e=[e]),t.rank!==e.length)throw new a(`The length of input n (${e.length}) does not match the number of dimensions in input x (${t.rank})`);return i.tile(t,e)}function lt(t,e=0,s=1,n,r){return i.randomNormal(t,e,s,n,r)}function ut(t,e,s,n){if(t.rank<2||e.rank<2)throw new o(`dot requires both inputs to be rank >= 2 but got x shape = ${t.shape} and y shape = ${e.shape}`);if(e.rank>=3){if(t.shape.slice(-1)[0]!==e.shape.slice(-2)[0])throw new o(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${t.shape} and y shape = ${e.shape}`)}if(2===t.rank&&2===e.rank){const r=!1,a=!1;return i.fused.matMul({a:t,b:e,transposeA:r,transposeB:a,bias:n?pt(t.rank,n,"channelsLast"):null,activation:s})}{const r=t.shape.slice(),a=r.pop();t=i.reshape(t,[-1,a]);const o=e.shape.slice(),l=o.pop(),u=o.pop(),h=[...o,l],c=Array.from({length:e.rank},((t,s)=>0===s?e.rank-2:s<=e.rank-2?s-1:s));e=i.reshape(i.transpose(e,c),[u,-1]);const p=[...r,...h],d=!1,g=!1;return i.reshape(i.fused.matMul({a:t,b:e,transposeA:d,transposeB:g,bias:n?pt(t.rank,n,"channelsLast"):null,activation:s}),p)}}function ht(t,s,n){return e.tidy((()=>(s=Array.isArray(s)?e.tensor1d(s,"int32"):i.cast(s,"int32"),i.gather(t,s,n))))}function ct(t){return i.mul(t,t)}function pt(t,e,s){const n=e.shape;if(1!==e.rank&&e.rank!==t)throw new a(`Unexpected bias dimensions: ${e.rank}; expected it to be 1 or ${t}`);if(5===t){if("channelsFirst"===s)return 1===n.length?i.reshape(e,[1,n[0],1,1,1]):i.reshape(e,[1,n[3],n[0],n[1],n[2]]);if("channelsLast"===s)return 1===n.length?i.reshape(e,[1,1,1,1,n[0]]):i.reshape(e,[1].concat(n))}else if(4===t){if("channelsFirst"===s)return 1===n.length?i.reshape(e,[1,n[0],1,1]):i.reshape(e,[1,n[2],n[0],n[1]]);if("channelsLast"===s)return 1===n.length?i.reshape(e,[1,1,1,n[0]]):i.reshape(e,[1].concat(n))}else if(3===t){if("channelsFirst"===s)return 1===n.length?i.reshape(e,[1,n[0],1]):i.reshape(e,[1,n[1],n[0]]);if("channelsLast"===s)return 1===n.length?i.reshape(e,[1,1,n[0]]):i.reshape(e,[1].concat(n))}else if(t<3)return e;throw new a(`Unsupported input rank by biasAdd: ${e.rank}`)}function dt(t,s,n){return e.tidy((()=>(null==n&&(n="channelsLast"),B(n),i.add(t,pt(t.rank,s,n)))))}function gt(t,s,n,r){return e.tidy((()=>i.dropout(t,s,n,r)))}function ft(t,e,s=!1){return s?t():e()}const mt=["fanIn","fanOut","fanAvg"],yt=["normal","uniform","truncatedNormal"];class bt extends e.serialization.Serializable{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class wt extends bt{apply(t,s){return e.zeros(t,s)}}wt.className="Zeros",e.serialization.registerClass(wt);class zt extends bt{apply(t,s){return e.ones(t,s)}}zt.className="Ones",e.serialization.registerClass(zt);class kt extends bt{constructor(t){if(super(),"object"!=typeof t)throw new a(`Expected argument of type ConstantConfig but got ${t}`);if(void 0===t.value)throw new a(`config must have value set but got ${t}`);this.value=t.value}apply(t,s){return e.tidy((()=>e.mul(e.scalar(this.value),e.ones(t,s))))}getConfig(){return{value:this.value}}}kt.className="Constant",e.serialization.registerClass(kt);class St extends bt{constructor(t){super(),this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=t.minval||this.DEFAULT_MINVAL,this.maxval=t.maxval||this.DEFAULT_MAXVAL,this.seed=t.seed}apply(t,s){return e.randomUniform(t,this.minval,this.maxval,s)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}St.className="RandomUniform",e.serialization.registerClass(St);class vt extends bt{constructor(t){super(),this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=t.mean||this.DEFAULT_MEAN,this.stddev=t.stddev||this.DEFAULT_STDDEV,this.seed=t.seed}apply(t,e){if("float32"!==(e=e||"float32")&&"int32"!==e)throw new o(`randomNormal does not support dType ${e}.`);return lt(t,this.mean,this.stddev,e,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}vt.className="RandomNormal",e.serialization.registerClass(vt);class Nt extends bt{constructor(t){super(),this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=t.mean||this.DEFAULT_MEAN,this.stddev=t.stddev||this.DEFAULT_STDDEV,this.seed=t.seed}apply(t,s){if("float32"!==(s=s||"float32")&&"int32"!==s)throw new o(`truncatedNormal does not support dType ${s}.`);return e.truncatedNormal(t,this.mean,this.stddev,s,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Nt.className="TruncatedNormal",e.serialization.registerClass(Nt);class Ct extends bt{constructor(t){super(),this.gain=null!=t.gain?t.gain:1}apply(t,s){return e.tidy((()=>{if(2!==t.length||t[0]!==t[1])throw new a("Identity matrix initializer can only be used for 2D square matrices.");return e.mul(this.gain,e.eye(t[0]))}))}getConfig(){return{gain:this.gain}}}Ct.className="Identity",e.serialization.registerClass(Ct);class At extends bt{constructor(t){if(super(),t.scale<0)throw new a(`scale must be a positive float. Got: ${t.scale}`);var e;this.scale=null==t.scale?1:t.scale,this.mode=null==t.mode?"fanIn":t.mode,e=this.mode,N(mt,"FanMode",e),this.distribution=null==t.distribution?"normal":t.distribution,function(t){N(yt,"Distribution",t)}(this.distribution),this.seed=t.seed}apply(t,s){const i=function(t,e="channelsLast"){let s,i;if(B(e),2===t.length)s=t[0],i=t[1];else if(-1!==[3,4,5].indexOf(t.length)){if("channelsFirst"===e){const e=Z(t,2);s=t[1]*e,i=t[0]*e}else if("channelsLast"===e){const e=Z(t,0,t.length-2);s=t[t.length-2]*e,i=t[t.length-1]*e}}else{const e=Z(t);s=Math.sqrt(e),i=Math.sqrt(e)}return[s,i]}(t),n=i[0],r=i[1];let a=this.scale;if("fanIn"===this.mode?a/=Math.max(1,n):"fanOut"===this.mode?a/=Math.max(1,r):a/=Math.max(1,(n+r)/2),"normal"===this.distribution){const i=Math.sqrt(a);if("float32"!==(s=s||"float32")&&"int32"!==s)throw new o(`${this.getClassName()} does not support dType ${s}.`);return e.truncatedNormal(t,0,i,s,this.seed)}{const i=Math.sqrt(3*a);return e.randomUniform(t,-i,i,s)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}At.className="VarianceScaling",e.serialization.registerClass(At);class xt extends At{constructor(t){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:null==t?null:t.seed})}getClassName(){return At.className}}xt.className="GlorotUniform",e.serialization.registerClass(xt);class It extends At{constructor(t){super({scale:1,mode:"fanAvg",distribution:"normal",seed:null==t?null:t.seed})}getClassName(){return At.className}}It.className="GlorotNormal",e.serialization.registerClass(It);class Lt extends At{constructor(t){super({scale:2,mode:"fanIn",distribution:"normal",seed:null==t?null:t.seed})}getClassName(){return At.className}}Lt.className="HeNormal",e.serialization.registerClass(Lt);class Tt extends At{constructor(t){super({scale:2,mode:"fanIn",distribution:"uniform",seed:null==t?null:t.seed})}getClassName(){return At.className}}Tt.className="HeUniform",e.serialization.registerClass(Tt);class Et extends At{constructor(t){super({scale:1,mode:"fanIn",distribution:"normal",seed:null==t?null:t.seed})}getClassName(){return At.className}}Et.className="LeCunNormal",e.serialization.registerClass(Et);class Dt extends At{constructor(t){super({scale:1,mode:"fanIn",distribution:"uniform",seed:null==t?null:t.seed})}getClassName(){return At.className}}Dt.className="LeCunNormal",e.serialization.registerClass(Dt);class $t extends bt{constructor(t){if(super(),this.DEFAULT_GAIN=1,this.gain=null==t.gain?this.DEFAULT_GAIN:t.gain,this.seed=t.seed,null!=this.seed)throw new o("Random seed is not implemented for Orthogonal Initializer yet.")}apply(t,s){return e.tidy((()=>{if(t.length<2)throw new o("Shape must be at least 2D.");t[0]*t[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${t[0]*t[1]}) elements: Slowness may result.`);const s=lt(t[0]>t[1]?[t[1],t[0]]:t,0,1,"float32");let i=e.linalg.gramSchmidt(s);return t[0]>t[1]&&(i=e.transpose(i)),e.mul(this.gain,i)}))}getConfig(){return{gain:this.gain,seed:this.seed}}}$t.className="Orthogonal",e.serialization.registerClass($t);const Ft={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function Ot(t,s={}){return z(t,e.serialization.SerializationMap.getMap().classNameMap,s,"initializer")}function Rt(t){return b(t)}function _t(t){if("string"==typeof t){const e=t in Ft?Ft[t]:t;if("GlorotNormal"===e)return new It;if("GlorotUniform"===e)return new xt;if("HeNormal"===e)return new Lt;if("HeUniform"===e)return new Tt;if("LeCunNormal"===e)return new Et;if("LeCunUniform"===e)return new Dt;{const t={};return t.className=e,t.config={},Ot(t)}}return t instanceof bt?t:Ot(t)}function Mt(t){return Array.isArray(t)&&Array.isArray(t[0])}function Bt(t){return 0===t.length?[]:Array.isArray(t[0])?t:[t]}function Wt(t){let e;if(Array.isArray(t)){if(1!==t.length)throw new a(`Expected Tensor length to be 1; got ${t.length}`);e=t[0]}else e=t;return e}function Ut(t){if(Array.isArray(t)&&Array.isArray(t[0])){if(1===t.length)return t[0];throw new a(`Expected exactly 1 Shape; got ${t.length}`)}return t}function Pt(t){let e=0;for(const s of t)0===s.shape.length?e+=1:e+=s.shape.reduce(((t,e)=>t*e));return e}const jt="Variable";class Vt{constructor(t,e="float32",s="Variable",n=!0,r=null){this.dtype=null==e?"float32":e,this.shape=t.shape,this.id=T(),s=null==s?jt:s,this.originalName=V(s),this.name=q(this.originalName),this.trainable_=n,this.constraint=r,this.val=i.variable(t,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(t){return this.assertNotDisposed(),function(t,e){if(t.shape.toString()!==e.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(t.shape)+" vs. "+JSON.stringify(e.shape))}(this.val,t),this.val.id!==t.id&&(this.val.assign(t),null!=this.constraint&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(t){this.trainable_=t,this.val.trainable=t}}function qt(t){return t.map((t=>t.read()))}function Jt(t){t.forEach((t=>{t[0].write(t[1])}))}class Ht{constructor(t){this.dtype=t.dtype,this.shape=t.shape,null!=t.shape?this.ndim=t.shape.length:this.ndim=t.ndim,this.maxNDim=t.maxNDim,this.minNDim=t.minNDim,this.axes=t.axes||{}}}class Zt{constructor(t,e,s,i,n,r,a){this.dtype=t,this.shape=e,this.sourceLayer=s,this.inputs=i,this.callArgs=n,this.outputTensorIndex=a,this.id=T(),null!=r&&(this.originalName=V(r),this.name=q(this.originalName)),this.rank=e.length}}let Kt=0;class Gt{constructor(t,e){this.callArgs=e,this.id=Kt++,this.outboundLayer=t.outboundLayer,this.inboundLayers=t.inboundLayers,this.nodeIndices=t.nodeIndices,this.tensorIndices=t.tensorIndices,this.inputTensors=t.inputTensors,this.outputTensors=t.outputTensors,this.inputMasks=t.inputMasks,this.outputMasks=t.outputMasks,this.inputShapes=t.inputShapes,this.outputShapes=t.outputShapes;for(const e of t.inboundLayers)null!=e&&e.outboundNodes.push(this);t.outboundLayer.inboundNodes.push(this)}getConfig(){const t=[];for(const e of this.inboundLayers)null!=e?t.push(e.name):t.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:t,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let Yt=0;class Xt extends e.serialization.Serializable{constructor(t={}){super(),this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=Yt++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let e=t.name;if(!e){const t=this.getClassName();e=f(t)+"_"+D(t)}if(this.name=e,this.trainable_=null==t.trainable||t.trainable,null!=t.inputShape||null!=t.batchInputShape){let e;if(null!=t.batchInputShape)e=t.batchInputShape;else if(null!=t.inputShape){let s=null;null!=t.batchSize&&(s=t.batchSize),e=[s].concat(t.inputShape)}this.batchInputShape=e;let s=t.dtype;null==s&&(s=t.inputDType),null==s&&(s="float32"),this.dtype=s}null!=t.weights?this.initialWeights=t.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(t,e){return t.name+"_ib-"+e.toString()}getNodeAtIndex(t,e){if(0===this.inboundNodes.length)throw new r(`The layer has never been called and thus has no defined ${e}.`);if(this.inboundNodes.length<=t)throw new a(`Asked to get ${e} at node ${t}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[t]}getInputAt(t){return d(this.getNodeAtIndex(t,"input").inputTensors)}getOutputAt(t){return d(this.getNodeAtIndex(t,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new n(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(0===this.inboundNodes.length)throw new n(`Layer ${this.name} is not connected, no input to return.`);return d(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(0===this.inboundNodes.length)throw new n(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new n(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return d(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map((t=>t()))}get updates(){return this._updates}get built(){return this._built}set built(t){this._built=t}get trainable(){return this.trainable_}set trainable(t){this._trainableWeights.forEach((e=>e.trainable=t)),this.trainable_=t}get trainableWeights(){return this.trainable_?this._trainableWeights.filter((t=>t.trainable)):[]}set trainableWeights(t){this._trainableWeights=t}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter((t=>!t.trainable)).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(t){this._nonTrainableWeights=t}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(t){if(t=g(t),null==this.inputSpec||0===this.inputSpec.length)return;const e=g(this.inputSpec);if(t.length!==e.length)throw new a(`Layer ${this.name} expects ${e.length} inputs, but it received ${t.length} input tensors. Input received: ${t}`);for(let s=0;s<t.length;s++){const i=t[s],n=e[s];if(null==n)continue;const r=i.rank;if(null!=n.ndim&&r!==n.ndim)throw new a(`Input ${s} is incompatible with layer ${this.name}: expected ndim=${n.ndim}, found ndim=${r}`);if(null!=n.maxNDim&&r>n.maxNDim)throw new a(`Input ${s} is incompatible with layer ${this.name}: expected max_ndim=${n.maxNDim}, found ndim=${r}`);if(null!=n.minNDim&&r<n.minNDim)throw new a(`Input ${s} is incompatible with layer ${this.name}: expected min_ndim=${n.minNDim}, found ndim=${r}.`);if(null!=n.dtype&&i.dtype!==n.dtype)throw new a(`Input ${s} is incompatible with layer ${this.name} : expected dtype=${n.dtype}, found dtype=${i.dtype}.`);if(n.axes){const t=i.shape;for(const e in n.axes){const i=Number(e),r=n.axes[e],o=i>=0?t[i]:t[t.length+i];if(null!=r&&-1===[r,null].indexOf(o))throw new a(`Input ${s} is incompatible with layer ${this.name}: expected axis ${i} of input shape to have value ${r} but got shape ${t}.`)}}if(null!=n.shape)for(let t=0;t<n.shape.length;++t){const e=n.shape[t],r=i.shape[t];if(null!=e&&null!=r&&e!==r)throw new a(`Input ${s} is incompatible with layer ${this.name}: expected shape=${n.shape}, found shape=${i.shape}.`)}}}call(t,e){return t}invokeCallHook(t,e){null!=this._callHook&&this._callHook(t,e)}setCallHook(t){this._callHook=t}clearCallHook(){this._callHook=null}apply(t,e){e=e||{},this.assertNotDisposed();const s=g(t);let i=!0;for(const t of s)if(!(t instanceof Zt)){i=!1;break}let n=!0;for(const t of s)if(t instanceof Zt){n=!1;break}if(i===n)throw new a("Arguments to apply() must be all SymbolicTensors or all Tensors");return j(this.name,(()=>{if(!this.built){this.assertInputCompatibility(t);const e=[];for(const s of g(t))e.push(s.shape);this.build(d(e)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),null===this._refCount&&n&&(this._refCount=1)}if(this.assertInputCompatibility(t),n){let i=this.call(t,e);const n=g(i),r=[];for(let t of n)-1!==s.indexOf(t)&&(t=t.clone()),r.push(t);if(i=d(r),null!=this.activityRegularizer)throw new o("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return i}{const s=function(t){t=g(t);const e=[];for(const s of t)e.push(s.shape);return d(e)}(t),i=this.computeOutputShape(s);let n;const r="float32";if(this.warnOnIncompatibleInputShape(Array.isArray(t)?s[0]:s),n=null!=i&&i.length>0&&Array.isArray(i[0])?i.map(((s,i)=>new Zt(r,s,this,g(t),e,this.name,i))):new Zt(r,i,this,g(t),e,this.name),this.addInboundNode(t,n,null,null,s,i,e),this._refCount++,null!=this.activityRegularizer)throw new o("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return n}}))}warnOnIncompatibleInputShape(t){if(null!=this.batchInputShape)if(t.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(t)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let e=!1;this.batchInputShape.forEach(((s,i)=>{null!=s&&null!=t[i]&&t[i]!==s&&(e=!0)})),e&&console.warn(`The shape of the input tensor (${JSON.stringify(t)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(null==this.inboundNodes||0===this.inboundNodes.length)throw new n(`The layer ${this.name} has never been called and thus has no defined output shape.`);const t=[];for(const e of this.inboundNodes){const s=JSON.stringify(e.outputShapes);-1===t.indexOf(s)&&t.push(s)}if(1===t.length){const t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&1===t.length?t[0]:t}throw new n(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new r(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return Pt(this.weights)}build(t){this.built=!0}getWeights(t=!1){return qt(t?this.trainableWeights:this.weights)}setWeights(t){e.tidy((()=>{const s=this.weights;if(s.length!==t.length)throw new a(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${t.length}, but the layer was expecting ${s.length} weights. Provided weights: ${t}...`);if(0===s.length)return;const i=[],n=qt(s);for(let r=0;r<n.length;++r){const o=n[r],l=s[r],u=t[r];if(!e.util.arraysEqual(o.shape,u.shape))throw new a(`Layer weight shape ${o.shape} not compatible with provided weight shape ${u.shape}`);i.push([l,u])}Jt(i)}))}addWeight(t,e,s,i,n,r,o,l){if(-1!==this._addedWeightNames.indexOf(t))throw new a(`Duplicate weight name ${t} for layer ${this.name}`);this._addedWeightNames.push(t),null==s&&(s="float32"),this.fastWeightInitDuringBuild&&(i=null!=l?l():_t("zeros"));const u=i.apply(e,s),h=new Vt(u,s,t,r,o);return u.dispose(),null!=n&&this.addLoss((()=>n.apply(h.read()))),null==r&&(r=!0),r?this._trainableWeights.push(h):this._nonTrainableWeights.push(h),h}setFastWeightInitDuringBuild(t){this.fastWeightInitDuringBuild=t}addLoss(t){null==t||Array.isArray(t)&&0===t.length||(t=g(t),void 0!==this._losses&&null!==this._losses&&this.losses.push(...t))}computeOutputShape(t){return t}computeMask(t,e){if(!this.supportsMasking){if(null!=e){if(!Array.isArray(e))throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);e.forEach((t=>{if(null!=t)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)}))}return null}return e}addInboundNode(t,e,s,i,n,r,a=null){const o=g(t);e=g(e),s=g(s),i=g(i),n=Bt(n),r=Bt(r);const l=[],u=[],h=[];for(const t of o)l.push(t.sourceLayer),u.push(t.nodeIndex),h.push(t.tensorIndex);new Gt({outboundLayer:this,inboundLayers:l,nodeIndices:u,tensorIndices:h,inputTensors:o,outputTensors:e,inputMasks:s,outputMasks:i,inputShapes:n,outputShapes:r},a);for(let t=0;t<e.length;t++)e[t].sourceLayer=this,e[t].nodeIndex=this.inboundNodes.length-1,e[t].tensorIndex=t}getConfig(){const t={name:this.name,trainable:this.trainable};return null!=this.batchInputShape&&(t.batchInputShape=this.batchInputShape),null!=this.dtype&&(t.dtype=this.dtype),t}disposeWeights(){return this.weights.forEach((t=>t.dispose())),this.weights.length}assertNotDisposed(){if(0===this._refCount)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(null===this._refCount)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let t=0;return 0==--this._refCount&&(t=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:t}}}function Qt(t,e,s){if((null==e||null!=s&&s>0)&&(e=t.sourceLayer,s=t.nodeIndex),0===e.inboundNodes.length)return[t];{const t=e.inboundNodes[s];if(0===t.inboundLayers.length)return t.inputTensors;{const e=[];for(let s=0;s<t.inboundLayers.length;s++){const i=Qt(t.inputTensors[s],t.inboundLayers[s],t.nodeIndices[s]);for(const t of i)-1===e.indexOf(t)&&e.push(t)}return e}}}class te extends Xt{constructor(t){if(super({dtype:t.dtype,name:null!=t.name?t.name:D("input").toString()}),null==t.batchSize&&(t.batchSize=null),null==t.sparse&&(t.sparse=!1),this.trainable=!1,this.built=!0,this.sparse=t.sparse,null!=t.inputShape&&null!=t.batchInputShape)throw new a("Only provide the inputShape OR batchInputShape argument to inputLayer, not both at the same time.");let e=t.batchInputShape;if(null==e){if(null==t.inputShape)throw new a("An InputLayer should be passed either a `batchInputShape` or an `inputShape`.");e=[t.batchSize].concat(t.inputShape)}else if(null!=t.batchSize)throw new a("Cannot specify batchSize if batchInputShape is specified when creating an InputLayer.");const s=t.dtype||"float32";this.batchInputShape=e,this.dtype=s,this.inputSpec=[{shape:e}];const i=new Zt(this.dtype,this.batchInputShape,this,[],{},this.name);i.nodeIndex=0,i.tensorIndex=0,new Gt({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:[i],outputTensors:[i],inputMasks:[null],outputMasks:[null],inputShapes:[e],outputShapes:[e]})}apply(t,e){throw new a(`Cannot pass any input to an InputLayer's apply() method. InputLayer name: ${this.name}`)}dispose(){return{refCountAfterDispose:this._refCount,numDisposedVariables:0}}getConfig(){return{batchInputShape:this.batchInputShape,dtype:this.dtype,sparse:this.sparse,name:this.name}}}function ee(t){if(null==t.batchShape&&null==t.shape)throw new Error("Please provide to Input either a `shape` or a `batchShape` argument. Note that `shape` does not include the batch dimension.");if(null!=t.batchShape&&null!=t.shape)throw new a("Please provide either a `shape` or `batchShape` argument to Input, but not both.");let e=t.batchShape;null!=t.shape&&null==e&&(e=[null].concat(t.shape));let s=t.dtype;null==s&&(s="float32");return new te({batchInputShape:e,name:t.name,dtype:s,sparse:t.sparse}).inboundNodes[0].outputTensors[0]}te.className="InputLayer",e.serialization.registerClass(te);class se{constructor(t){if(this.id2Value={},this.id2Mask={},this.name2Id={},t instanceof se)for(const e in t.id2Value)this.id2Value[e]=t.id2Value[e],e in t.id2Mask&&(this.id2Mask[e]=t.id2Mask[e]);else{if(null==t)return;for(const e of t)this.add(e.key,e.value)}}add(t,s,i){if(null!=this.id2Value[t.id])throw new a(`Duplicate key: name=${t.name}, id=${t.id}`);return this.id2Value[t.id]=function(t,s){if(null==t.dtype||t.dtype===s.dtype)return s;try{return e.cast(s,t.dtype)}catch(e){throw new a(`The dtype of the feed (${s.dtype}) can not be cast to the dtype of the key '${t.name}' (${t.dtype}).`)}}(t,s),this.name2Id[t.name]=t.id,null!=i&&(this.id2Mask[t.id]=i),this}addFeed(t){this.add(t.key,t.value)}hasKey(t){return null!=this.id2Value[t.id]}names(){return Object.keys(this.name2Id)}getValue(t){if(t instanceof Zt){if(null==this.id2Value[t.id])throw new a(`Nonexistent key: ${t.name}`);return this.id2Value[t.id]}{const e=this.name2Id[t];if(null==e)throw new a(`Feed dict has no SymbolicTensor name: ${t}`);return this.id2Value[e]}}getMask(t){if(t instanceof Zt){if(null==this.id2Value[t.id])throw new a(`Nonexistent key: ${t.name}`);return this.id2Mask[t.id]}{const e=this.name2Id[t];if(null==e)throw new a(`Feed dict has no SymbolicTensor name: ${t}`);return this.id2Mask[e]}}disposeMasks(){null!=this.id2Mask&&e.dispose(this.id2Mask)}}const ie=new u,ne=new u;function re(t,s,i,n){const r=null!=i&&i.training,a=Array.isArray(t),o=a?t:[t],l=o.map((t=>t.name)),u=[],h=s.names();for(const t of l)-1!==h.indexOf(t)?u.push(s.getValue(t)):u.push(null);null!=n&&(n.maxNumTensors=-1/0,n.minNumTensors=1/0);const c=l.join(",")+"|"+s.names().sort().join(",");let p,d=ie.get(c);if(null==d){const t=function(t,s){e.util.assert(null!=t&&t.length>0,(()=>"Expected at least one fetch, got none"));let i=[],n={};if(1===t.length){const e=oe(t[0],s);i=e.sorted,n=e.recipientMap}else{const e=new Set;for(const r of t){const{sorted:t,recipientMap:a}=oe(r,s);for(const s of t)e.has(s.name)||(i.push(s),e.add(s.name));for(const t in a)null==n[t]&&(n[t]=new Set),a[t].forEach((e=>n[t].add(e)))}}return{sorted:i,recipientCounts:ae(n)}}(o,s);d=t.sorted,p=t.recipientCounts,ie.put(c,d),ne.put(c,p)}p={},r||Object.assign(p,ne.get(c));const f=new se(s);for(let t=0;t<d.length;++t){if(null!=n){const t=e.memory().numTensors;t>n.maxNumTensors&&(n.maxNumTensors=t),t<n.minNumTensors&&(n.minNumTensors=t)}const a=d[t],o=a.sourceLayer;if(o instanceof te)continue;const h=[],c=[],m=[];let y=!1;for(const t of a.inputs){const e=f.getValue(t),i=f.getMask(t);h.push(e),c.push(i),null!=i&&(y=!0),r||(p[t.name]--,0!==p[t.name]||s.hasKey(t)||-1!==l.indexOf(t.name)||e.isDisposed||!0===t.sourceLayer.stateful||m.push(e))}y&&((i=i||{}).mask=c[0]);const b=g(o.apply(h,i));let w=null;o.supportsMasking&&(w=o.computeMask(h,c));const z=le(a),k=Array.isArray(z)?z:[z];for(let t=0;t<k.length;++t){f.hasKey(k[t])||f.add(k[t],b[t],Array.isArray(w)?w[0]:w);const e=l.indexOf(k[t].name);-1!==e&&(u[e]=b[t])}r||e.dispose(m)}return f.disposeMasks(),a?u:u[0]}function ae(t){const e={};for(const s in t)e[s]=t[s].size;return e}function oe(t,e){const s=new Set,i=[],n={};for(const t of e.names())s.add(t);const r=[],a=[];for(r.push(t);r.length>0;){const t=r[r.length-1];if(s.has(t.name)){r.pop();continue}const e=a[a.length-1]===r.length-1;if(0===t.inputs.length||e)r.pop(),i.push(t),s.add(t.name),e&&a.pop();else{a.push(r.length-1);for(const e of t.inputs)null==n[e.name]&&(n[e.name]=new Set),n[e.name].add(t.name),s.has(e.name)||r.push(e)}}return{sorted:i,recipientMap:n}}function le(t){let e;if(1===t.sourceLayer.inboundNodes.length)e=t.sourceLayer.output;else{let s=null;for(let e=0;e<t.sourceLayer.inboundNodes.length;++e)for(const i of t.sourceLayer.inboundNodes[e].outputTensors)if(i.id===t.id){s=e;break}e=t.sourceLayer.getOutputAt(s)}return e}function ue(t,s){return e.tidy((()=>i.sqrt(i.sum(i.mul(t,t),s,!0))))}e.env().registerFlag("TOPOLOGICAL_SORT_CACHE_MAX_ENTRIES",(()=>100),(function(t){null!=ie&&ie.setMaxEntries(t),null!=ne&&ne.setMaxEntries(t)}));class he extends e.serialization.Serializable{getConfig(){return{}}}class ce extends he{constructor(t){super(),this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=null!=t.maxValue?t.maxValue:this.defaultMaxValue,this.axis=null!=t.axis?t.axis:this.defaultAxis}apply(t){return e.tidy((()=>{const e=ue(t,this.axis),s=i.clipByValue(e,0,this.maxValue);return i.mul(t,i.div(s,i.add(Q(),e)))}))}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}ce.className="MaxNorm",e.serialization.registerClass(ce);class pe extends he{constructor(t){super(),this.defaultAxis=0,this.axis=null!=t.axis?t.axis:this.defaultAxis}apply(t){return e.tidy((()=>i.div(t,i.add(Q(),ue(t,this.axis)))))}getConfig(){return{axis:this.axis}}}pe.className="UnitNorm",e.serialization.registerClass(pe);class de extends he{apply(t){return i.relu(t)}}de.className="NonNeg",e.serialization.registerClass(de);class ge extends he{constructor(t){super(),this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=null!=t.minValue?t.minValue:this.defaultMinValue,this.maxValue=null!=t.maxValue?t.maxValue:this.defaultMaxValue,this.rate=null!=t.rate?t.rate:this.defaultRate,this.axis=null!=t.axis?t.axis:this.defaultAxis}apply(t){return e.tidy((()=>{const e=ue(t,this.axis),s=i.add(i.mul(this.rate,i.clipByValue(e,this.minValue,this.maxValue)),i.mul(1-this.rate,e));return i.mul(t,i.div(s,i.add(Q(),e)))}))}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}ge.className="MinMaxNorm",e.serialization.registerClass(ge);const fe={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function me(t){return b(t)}function ye(t,s={}){return z(t,e.serialization.SerializationMap.getMap().classNameMap,s,"constraint")}function be(t){if(null==t)return null;if("string"==typeof t){return ye({className:t in fe?fe[t]:t,config:{}})}return t instanceof he?t:ye(t)}var we={__proto__:null,maxNorm:function(t){return new ce(t)},unitNorm:function(t){return new pe(t)},nonNeg:function(){return new de},minMaxNorm:function(t){return new ge(t)}};var ze,ke={__proto__:null,zeros:function(){return new wt},ones:function(){return new zt},constant:function(t){return new kt(t)},randomUniform:function(t){return new St(t)},randomNormal:function(t){return new vt(t)},truncatedNormal:function(t){return new Nt(t)},identity:function(t){return new Ct(t)},varianceScaling:function(t){return new At(t)},glorotUniform:function(t){return new xt(t)},glorotNormal:function(t){return new It(t)},heNormal:function(t){return new Lt(t)},heUniform:function(t){return new Tt(t)},leCunNormal:function(t){return new Et(t)},leCunUniform:function(t){return new Dt(t)},orthogonal:function(t){return new $t(t)}};async function Se(t){if(null==t)return;const s=[],i=[],n=[];for(const e in t){const r=t[e];if("number"!=typeof r){const t=r;s.push(t.data()),i.push(e),n.push(t)}}if(s.length>0){const r=await Promise.all(s);for(let e=0;e<r.length;++e)t[i[e]]=r[e][0];e.dispose(n)}}function ve(t){if(null!=t)for(const e in t){const s=t[e];"number"!=typeof s&&s.dispose()}}!function(t){t[t.SILENT=0]="SILENT",t[t.VERBOSE=1]="VERBOSE"}(ze||(ze={}));class Ne{constructor(){this.validationData=null}setParams(t){this.params=t}async onEpochBegin(t,e){}async onEpochEnd(t,e){}async onBatchBegin(t,e){}async onBatchEnd(t,e){}async onTrainBegin(t){}async onTrainEnd(t){}setModel(t){}}class Ce{constructor(t,e=10){null==t&&(t=[]),this.callbacks=t,this.queueLength=e}append(t){this.callbacks.push(t)}setParams(t){for(const e of this.callbacks)e.setParams(t)}setModel(t){for(const e of this.callbacks)e.setModel(t)}async onEpochBegin(t,e){null==e&&(e={});for(const s of this.callbacks)await s.onEpochBegin(t,e)}async onEpochEnd(t,e){null==e&&(e={});for(const s of this.callbacks)await s.onEpochEnd(t,e)}async onBatchBegin(t,e){null==e&&(e={});for(const s of this.callbacks)await s.onBatchBegin(t,e)}async onBatchEnd(t,e){null==e&&(e={});for(const s of this.callbacks)await s.onBatchEnd(t,e)}async onTrainBegin(t){null==t&&(t={});for(const e of this.callbacks)await e.onTrainBegin(t)}async onTrainEnd(t){null==t&&(t={});for(const e of this.callbacks)await e.onTrainEnd(t)}}class Ae extends Ne{constructor(){super()}async onEpochBegin(t){this.seen=0,this.totals={}}async onBatchEnd(t,s){null==s&&(s={});const i=null==s.size?0:s.size;this.seen+=i;for(const t in s){const n=s[t];if("number"==typeof n)this.totals.hasOwnProperty(t)||(this.totals[t]=0),this.totals[t]=this.totals[t]+n*i;else{let s;t in this.totals?s=this.totals[t]:this.totals[t]=0;const r=e.tidy((()=>e.add(this.totals[t],e.mul(n,i))));this.totals[t]=r,null!=s&&s.dispose()}}}async onEpochEnd(t,s){if(null!=s)for(const t of this.params.metrics)null!=this.totals[t]&&("number"==typeof this.totals[t]?s[t]=this.totals[t]/this.seen:e.tidy((()=>{const i=e.mul(e.div(1,this.seen),this.totals[t]);s[t]=i,this.totals[t].dispose(),e.keep(s[t])})))}}class xe extends Ne{async onTrainBegin(t){this.epoch=[],this.history={}}async onEpochEnd(t,e){null==e&&(e={}),this.epoch.push(t);for(const t in e)null==this.history[t]&&(this.history[t]=[]),this.history[t].push(e[t])}async syncData(){const t=[],e=[],s=[];for(const i in this.history){const n=this.history[i];for(let r=0;r<n.length;++r)if("number"!=typeof n[r]){const a=n[r];t.push(a.data()),e.push(i),s.push(r)}}const i=await Promise.all(t);for(let t=0;t<i.length;++t){this.history[e[t]][s[t]].dispose(),this.history[e[t]][s[t]]=i[t][0]}}}class Ie extends Ne{constructor(t,s){if(super(),this.currentEpoch=0,this.nowFunc=t.nowFunc,this.nextFrameFunc=t.nextFrameFunc||e.nextFrame,this.yieldEvery=s||"auto","auto"===this.yieldEvery&&(this.yieldEvery=125),"never"===this.yieldEvery&&null!=t.onYield)throw new Error("yieldEvery is `never` but you provided an `onYield` callback. Either change `yieldEvery` or remove the callback");e.util.isNumber(this.yieldEvery)&&(this.maybeWait=function(t,s,i){let n,r=null!=i?i():e.util.now();return(...a)=>{const o=null!=i?i():e.util.now();return o-r<s||(r=o,n=t(...a)),n}}(this.maybeWait.bind(this),this.yieldEvery,this.nowFunc)),this.trainBegin=t.onTrainBegin,this.trainEnd=t.onTrainEnd,this.epochBegin=t.onEpochBegin,this.epochEnd=t.onEpochEnd,this.batchBegin=t.onBatchBegin,this.batchEnd=t.onBatchEnd,this.yield=t.onYield}async maybeWait(t,e,s){const i=[];null!=this.yield&&(await Se(s),i.push(this.yield(t,e,s))),i.push(this.nextFrameFunc()),await Promise.all(i)}async onEpochBegin(t,e){this.currentEpoch=t,null!=this.epochBegin&&(await Se(e),await this.epochBegin(t,e))}async onEpochEnd(t,e){const s=[];null!=this.epochEnd&&(await Se(e),s.push(this.epochEnd(t,e))),"epoch"===this.yieldEvery&&s.push(this.nextFrameFunc()),await Promise.all(s)}async onBatchBegin(t,e){null!=this.batchBegin&&(await Se(e),await this.batchBegin(t,e))}async onBatchEnd(t,s){const i=[];null!=this.batchEnd&&(await Se(s),i.push(this.batchEnd(t,s))),"batch"===this.yieldEvery?i.push(this.nextFrameFunc()):e.util.isNumber(this.yieldEvery)&&i.push(this.maybeWait(this.currentEpoch,t,s)),await Promise.all(i)}async onTrainBegin(t){null!=this.trainBegin&&(await Se(t),await this.trainBegin(t))}async onTrainEnd(t){null!=this.trainEnd&&(await Se(t),await this.trainEnd(t))}}function Le(t,e){if(null==t&&(t={}),t instanceof Ne)return[t];if(Array.isArray(t)&&t[0]instanceof Ne)return t;return g(t).map((t=>new Ie(t,e)))}class Te{constructor(){}static registerCallbackConstructor(t,s){e.util.assert(t>=0&&Number.isInteger(t),(()=>`Verbosity level is expected to be an integer >= 0, but got ${t}`)),Te.checkForDuplicate(s),null==Te.constructors[t]&&(Te.constructors[t]=[]),Te.constructors[t].push(s)}static checkForDuplicate(t){for(const e in Te.constructors){Te.constructors[+e].forEach((e=>{if(e===t)throw new a("Duplicate callback constructor.")}))}}static clear(){Te.constructors={}}static createCallbacks(t){const e=[];for(const s in Te.constructors){const i=+s;t>=i&&e.push(...Te.constructors[i])}return e.map((t=>new t))}}function Ee(t,e,s,i,n,r,a,o,l){const u=new xe,h=[new Ae,...Te.createCallbacks(e)];null!=t&&h.push(...t),h.push(u);const c=new Ce(h);return c.setParams({epochs:s,initialEpoch:i,samples:n,steps:r,batchSize:a,verbose:e,doValidation:o,metrics:l}),{callbackList:c,history:u}}function De(t,s={},i=!1){return z(t,e.serialization.SerializationMap.getMap().classNameMap,s,"layer",i)}function $e(t,s){return e.tidy((()=>{"float32"!==t.dtype&&(t=i.cast(t,"float32"));const e=i.sum(ct(t),s,!0),n=i.fill(e.shape,Q()),r=i.sqrt(i.maximum(e,n));return i.div(t,r)}))}function Fe(t,s){return e.tidy((()=>i.mean(ct(i.sub(s,t)),-1)))}function Oe(t,s){return e.tidy((()=>i.mean(i.abs(i.sub(s,t)),-1)))}function Re(t,s){return e.tidy((()=>{const e=i.sub(t,s),n=i.clipByValue(i.abs(t),Q(),Number.MAX_VALUE),r=i.abs(i.div(e,n));return i.mul(100,i.mean(r,-1))}))}function _e(t,s,n=!1){return e.tidy((()=>{if(n)s=i.softmax(s);else{const t=i.sum(s,s.shape.length-1,!0);s=i.div(s,t)}return s=i.clipByValue(s,Q(),1-Q()),i.neg(i.sum(i.mul(i.cast(t,"float32"),i.log(s)),s.shape.length-1))}))}function Me(t,s,n=!1){return e.tidy((()=>{const e=i.cast(i.floor(function(t){const e=[Z(t.shape)];return i.reshape(t,e)}(t)),"int32"),r=(s=i.clipByValue(s,Q(),1-Q())).shape;return _e(i.reshape(i.oneHot(e,r[r.length-1]),r),s,n)}))}function Be(t,s){return e.tidy((()=>{let n;return n=i.clipByValue(s,Q(),1-Q()),n=i.log(i.div(n,i.sub(1,n))),i.mean(function(t,s){if(!e.util.arraysEqual(t.shape,s.shape))throw new a(`logits and labels must have the same shape, but got shapes ${JSON.stringify(t.shape)} and ${JSON.stringify(s.shape)}`);return e.tidy((()=>{const e=i.relu(s),n=i.neg(i.abs(s));return i.add(i.sub(e,i.mul(s,t)),i.log1p(i.exp(n)))}))}(t,n),-1)}))}function We(t,s){return e.tidy((()=>{const e=$e(t,-1),n=$e(s,-1),r=i.mul(e,n);return i.neg(i.sum(r,-1))}))}Te.constructors={};const Ue={meanSquaredError:Fe,meanAbsoluteError:Oe,meanAbsolutePercentageError:Re,meanSquaredLogarithmicError:function(t,s){return e.tidy((()=>{const e=i.clipByValue(s,Q(),Number.MAX_VALUE),n=i.log(i.add(1,e)),r=i.clipByValue(t,Q(),Number.MAX_VALUE),a=i.log(i.add(1,r));return i.mean(ct(i.sub(n,a)),-1)}))},squaredHinge:function(t,s){return e.tidy((()=>{const e=i.maximum(0,i.sub(1,i.mul(t,s)));return i.mean(ct(e),-1)}))},hinge:function(t,s){return e.tidy((()=>{const e=i.maximum(0,i.sub(1,i.mul(t,s)));return i.mean(e,-1)}))},categoricalHinge:function(t,s){return e.tidy((()=>{const e=i.sum(i.mul(t,s),-1),n=i.max(i.mul(i.sub(1,t),s),-1);return i.maximum(0,i.add(1,i.sub(n,e)))}))},logcosh:function(t,s){return e.tidy((()=>{const e=Math.log(2),n=i.sub(s,t),r=i.sub(i.add(n,i.softplus(i.mul(-2,n))),e);return i.mean(r,-1)}))},categoricalCrossentropy:_e,sparseCategoricalCrossentropy:Me,binaryCrossentropy:Be,kullbackLeiblerDivergence:function(t,s){return e.tidy((()=>{const e=i.clipByValue(t,Q(),1),n=i.clipByValue(s,Q(),1);return i.sum(i.mul(t,i.log(i.div(e,n))),-1)}))},poisson:function(t,s){return e.tidy((()=>{const e=i.log(i.add(Q(),s));return i.mean(i.sub(s,i.mul(t,e)),-1)}))},cosineProximity:We};function Pe(t){if("string"==typeof t){if(t in Ue)return Ue[t];let e=`Unknown loss ${t}`;throw t.toLowerCase().includes("softmaxcrossentropy")&&(e=`Unknown loss ${t}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new a(e)}return t}function je(t,s){return e.tidy((()=>{const e=i.mul(.5,i.onesLike(s)),n=tt(i.greater(s,e),t.dtype);return i.mean(i.equal(t,n),-1)}))}function Ve(t,s){return e.tidy((()=>tt(i.equal(i.argMax(t,-1),i.argMax(s,-1)),"float32")))}function qe(t,s){return e.tidy((()=>i.cast(i.sum(i.logicalAnd(i.equal(t,1),i.equal(s,1))),"float32")))}function Je(t,s){return e.tidy((()=>{const n=qe(t,s),r=function(t,s){return e.tidy((()=>i.cast(i.sum(i.logicalAnd(i.equal(t,0),i.equal(s,1))),"float32")))}(t,s),a=i.add(n,r);return i.cast(i.where(i.greater(a,0),i.div(n,a),0),"float32")}))}function He(t,s){return e.tidy((()=>{const n=qe(t,s),r=function(t,s){return e.tidy((()=>i.cast(i.sum(i.logicalAnd(i.equal(t,1),i.equal(s,0))),"float32")))}(t,s),a=i.add(n,r);return i.cast(i.where(i.greater(a,0),i.div(n,a),0),"float32")}))}function Ze(t,e){return Be(t,e)}function Ke(t,e){return t.rank===e.rank&&(t=i.squeeze(t,[t.rank-1])),(e=i.argMax(e,-1)).dtype!==t.dtype&&(e=i.cast(e,t.dtype)),i.cast(i.equal(t,e),"float32")}const Ge=_e,Ye=Me,Xe={binaryAccuracy:je,categoricalAccuracy:Ve,precision:Je,categoricalCrossentropy:Ge,sparseCategoricalCrossentropy:Ye,mse:Fe,MSE:Fe,mae:Oe,MAE:Oe,mape:Re,MAPE:Re,cosine:We};function Qe(t){if("string"==typeof t&&t in Xe)return Xe[t];if("string"!=typeof t&&null!=t)return t;throw new a(`Unknown metric ${t}`)}function ts(t){if(c(null!==t,`Unknown LossOrMetricFn ${t}`),"string"==typeof t)return t;{let e;for(const s of Object.keys(Ue))if(Ue[s]===t){e=s;break}if(void 0!==e)return e;for(const s of Object.keys(Xe))if(Xe[s]===t){e=s;break}return void 0!==e?e:t.name}}const es=1048576;function ss(t,e,s=!1){if(null==t||"object"!=typeof t||Object.getPrototypeOf(t)!==Object.prototype||!is(t))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(s){const s=JSON.stringify(t);s.length>es&&console.warn(`User-defined metadata of model "${e}" is too large in size (length=${s.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= 1048576.`)}}function is(t){if(null===t)return!0;if("object"==typeof t){if(Object.getPrototypeOf(t)===Object.prototype){const e=Object.keys(t);for(const s of e){if("string"!=typeof s)return!1;if(!is(t[s]))return!1}return!0}if(Array.isArray(t)){for(const e of t)if(!is(e))return!1;return!0}return!1}{const e=typeof t;return"string"===e||"number"===e||"boolean"===e}}function ns(t,e,s,i=console.log){const n=function(t){let e=!0;const s=[],i=[];for(const e in t.nodesByDepth)s.push(t.nodesByDepth[e]);for(const t of s){if(t.length>1||1===t.length&&t[0].inboundLayers.length>1){e=!1;break}i.push(...t)}if(e)for(const s of t.layers){let t=!1;for(const n of s.inboundNodes)if(-1!==i.indexOf(n)){if(t){e=!1;break}t=!0}if(!e)break}return e}(t),r=["Layer (type)","Input Shape","Output shape","Param #"];let a;if(n?(e=e||90,s=s||[.32,.61,.89,1]):(e=e||115,s=s||[.24,.48,.7,.8,1]),s[s.length-1]<=1&&(s=s.map((t=>Math.floor(e*t)))),!n){r.push("Receives inputs"),a=[];for(const e in t.nodesByDepth)a.push(...t.nodesByDepth[e])}i("_".repeat(e)),rs(r,s,i),i("=".repeat(e));const o=t.layers;for(let t=0;t<o.length;++t)n?as(o[t],s,i):os(o[t],s,a,i),i((t===o.length-1?"=":"_").repeat(e));t.checkTrainableWeightsConsistency();const l=function(t){let e;e=null!=t.collectedTrainableWeights?Pt(t.collectedTrainableWeights):Pt(t.trainableWeights);return e}(t),u=Pt(t.nonTrainableWeights);i(`Total params: ${l+u}`),i(`Trainable params: ${l}`),i(`Non-trainable params: ${u}`),i("_".repeat(e))}function rs(t,e,s=console.log){let i="";for(let s=0;s<t.length;++s)s>0&&(i=i.slice(0,i.length-1)+" "),i+=t[s],i=i.slice(0,e[s]),i+=" ".repeat(e[s]-i.length);s(i)}function as(t,e,s){let i,n;try{n=t.inboundNodes.map((t=>JSON.stringify(t.inputShapes))).join(",")}catch(t){n="multiple"}try{i=JSON.stringify(t.outputShape)}catch(t){i="multiple"}rs([`${t.name} (${t.getClassName()})`,n,i,t.countParams().toString()],e,s)}function os(t,e,s,i){let n,r;try{r=t.inboundNodes.map((t=>JSON.stringify(t.inputShapes))).join(",")}catch(t){r="multiple"}try{n=JSON.stringify(t.outputShape)}catch(t){n="multiple"}const a=[];for(const e of t.inboundNodes)if(!(null!=s&&s.length>0&&-1===s.indexOf(e)))for(let t=0;t<e.inboundLayers.length;++t){const s=e.inboundLayers[t].name,i=e.nodeIndices[t],n=e.tensorIndices[t];a.push(`${s}[${i}][${n}]`)}const o=t.name,l=t.getClassName(),u=0===a.length?"":a[0];rs([`${o} (${l})`,r,n,t.countParams().toString(),u],e,i);for(let t=1;t<a.length;++t)rs(["","","","",a[t]],e,i)}function ls(t,e,s){return("inboundNodes"===t||"outputLayers"===t||"inputLayers"===t)&&0===e&&"string"==typeof s}function us(t,e){if(null===t)return null;if("string"==typeof t)return m(t);if("number"==typeof t||"boolean"==typeof t)return t;if(t instanceof Array){const s=[],i=t.length;for(let n=0;n<i;++n){const i=t[n];ls(e,n,i)?s.push(i):s.push(us(i,e))}return s}{const e={};for(const s of Object.keys(t)){const i=t[s];if("name"===s&&"string"==typeof i)e[s]=i;else{const t=m(s);e[t]=us(i,t)}}return e}}function hs(t,e){if(null==t)return null;if("string"==typeof t)return f(t);if("number"==typeof t||"boolean"==typeof t)return t;if(t instanceof Array){const s=[],i=t.length;for(let n=0;n<i;++n){const i=t[n];ls(e,n,i)?s.push(i):s.push(hs(i,e))}return s}{const e={};for(const s of Object.keys(t)){const i=t[s],n=f(s);e[n]="name"!==s&&"className"!==s||"string"!=typeof i?hs(i,s):i}return e}}const cs="3.20.0";class ps extends Xt{constructor(t){if(super({}),this.containerNodes=new Set,this.name=t.name,null==this.name){const t=this.getClassName().toLowerCase();this.name=D(t)}if(this.supportsMasking=!1,this.trainable_=!0,Array.isArray(t.inputs)?this.inputs=t.inputs.slice():this.inputs=[t.inputs],Array.isArray(t.outputs)?this.outputs=t.outputs.slice():this.outputs=[t.outputs],S(this.inputs).length!==this.inputs.length)throw new a(`The list of inputs passed to the model is redundant. All inputs should only appear once. Found: ${this.inputs.map((t=>t.name))}`);S(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map((t=>t.name))}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const t of this.outputs){const e=t.sourceLayer,s=t.nodeIndex,i=t.tensorIndex;this.outputLayers.push(e),this.outputLayersNodeIndices.push(s),this.outputLayersTensorIndices.push(i)}for(const t of this.inputs){const e=t.sourceLayer,s=t.nodeIndex,i=t.tensorIndex;c(0===s,"input layer has >1 nodes"),c(0===i,"input layer has >1 tensors"),this.inputLayers.push(e),this.inputLayersNodeIndices.push(s),this.inputLayersTensorIndices.push(i)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let e=0;e<this.inputLayers.length;e++){const s=this.inputLayers[e];if(!(s instanceof te))throw new TypeError(`Input layers to a LayersModel must be InputLayer objects. Received inputs: ${t.inputs}. Input ${e} (0-based) originates from layer type ${s.getClassName()}.`);this.inputNames.push(s.name),this.feedInputShapes.push(s.batchInputShape),this.feedInputNames.push(s.name)}for(const t of this.outputLayers)this.outputNames.push(t.name);this.internalInputShapes=this.inputs.map((t=>t.shape)),this.internalOutputShapes=this.outputs.map((t=>t.shape));const e={},s={},i={},n={},o={},l=[],u=(t,e,s,i,n,a)=>{null!=i&&null!=n&&null!=a||(i=t.sourceLayer,n=t.nodeIndex,a=t.tensorIndex);const h=i.inboundNodes[n];if(-1!==s.indexOf(h))throw new r(`The tensor ${t.name} at layer "${i.name}" is part of a cycle.`);if(-1!==e.indexOf(h))return;this.containerNodes.add(ps.nodeKey(i,n)),i.id in o||(o[i.id]=Object.keys(o).length),-1===s.indexOf(h)&&s.push(h);const c=h.inboundLayers.length;for(let t=0;t<c;t++){const i=h.inputTensors[t],n=h.inboundLayers[t],r=h.nodeIndices[t],a=h.tensorIndices[t];u(i,e,s,n,r,a)}for(e.push(h);s.indexOf(h)>=0;)s.splice(s.indexOf(h),1);l.push(h)},h=[],p=[];for(const t of this.outputs)u(t,h,p);const d=l.slice().reverse();for(const t of d){s[t.id]=t,t.id in e||(e[t.id]=0);let r=e[t.id];const a=null==i[t.outboundLayer.id]?0:i[t.outboundLayer.id];r=Math.max(r,a),i[t.outboundLayer.id]=r,n[t.outboundLayer.id]=t.outboundLayer,e[t.id]=r;for(let i=0;i<t.inboundLayers.length;i++){const n=t.inboundLayers[i],a=t.nodeIndices[i],o=n.inboundNodes[a],l=null==e[o.id]?0:e[o.id];e[o.id]=Math.max(r+1,l),s[o.id]=o}}const g={};for(const t in e){const i=e[t];i in g||(g[i]=[]),g[i].push(s[t])}const f={};for(const t in i){const e=i[t];e in f||(f[e]=[]),f[e].push(n[t])}let m=Object.keys(f).map((t=>parseInt(t,10))).sort(k);this.layers=[];for(const t of m){const e=f[t];e.sort(((t,e)=>{const s=o[t.id],i=o[e.id];return s<i?-1:s>i?1:0}));for(const t of e)t instanceof ps&&this.internalContainerRefs.push(t),this.layers.push(t)}this.layersByDepth=f,m=Object.keys(g).map((t=>parseInt(t,10))).sort(k);const y=this.inputs.slice(),b=[];for(const t of m)for(const e of g[t]){const t=e.outboundLayer;if(null!=t){for(const s of e.inputTensors)if(-1===y.indexOf(s))throw new r(`Graph disconnected: cannot obtain value for tensor ${s} at layer "${t.name}". The following previous layers were accessed without issue: ${b}`);for(const t of e.outputTensors)y.push(t);b.push(t.name)}}this.nodesByDepth=g;const w=this.layers.map((t=>t.name));for(const t of w){const e=w.filter((e=>e===t)).length;if(1!==e)throw new r(`The name "${t}" is used ${e} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(w))}this.outboundNodes=[],this.inboundNodes=[],new Gt({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map((t=>null)),outputMasks:this.outputs.map((t=>null)),inputShapes:this.inputs.map((t=>t.shape)),outputShapes:this.outputs.map((t=>t.shape))}),this.built=!0,this._refCount=1}assertNotDisposed(){if(0===this._refCount)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const t={refCountAfterDispose:null,numDisposedVariables:0};if(0==--this._refCount){for(const e of this.layers)t.numDisposedVariables+=e.dispose().numDisposedVariables;for(const e of this.internalContainerRefs)t.numDisposedVariables+=e.dispose().numDisposedVariables}return t.refCountAfterDispose=this._refCount,t}get trainable(){return this.trainable_}set trainable(t){this.layers.forEach((e=>{e._trainableWeights.forEach((e=>e.trainable=t))})),this.trainable_=t}get trainableWeights(){if(this._trainableWeights.length>0)throw new a("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let t=[];for(const e of this.layers)t=t.concat(e.trainableWeights);return t}get nonTrainableWeights(){const t=[];for(const e of this.layers)t.push(...e.nonTrainableWeights);if(!this.trainable){const e=[];for(const t of this.layers)e.push(...t.trainableWeights);return e.concat(t)}return t}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(t,e=!0){const s={};let i=0;for(const t of this.layers)for(const e of t.weights){if(null!=s[e.originalName])throw new a(`Duplicate weight name: ${e.originalName}`);s[e.originalName]=e,i++}const n=[];for(const i in t){let r=i;if(null==s[i]){const t=i.split("/");r=t.slice(0,-2).concat([t[t.length-1]]).join("/")}if(null!=s[r])n.push([s[r],t[i]]);else if(e)throw new a(`Provided weight data has no target variable: ${i}`);delete s[r]}if(e){const t=[];for(const e in s)t.push(e);if(t.length>0)throw new a(`${t.length} of ${i} weights are not set: ${t}`)}Jt(n)}updatedConfig(){const t=this.getConfig(),e={};return e.className=this.getClassName(),e.config=t,e.kerasVersion="tfjs-layers 3.20.0",e.backend="TensorFlow.js",e}toJSON(t,e=!0){const s=hs(this.updatedConfig());return e?JSON.stringify(s):s}call(t,s){return e.tidy((()=>{t=g(t);const e=new se;for(let s=0;s<this.inputs.length;++s)e.add(this.inputs[s],t[s]);return re(this.outputs,e,s)}))}computeMask(t,s){return e.tidy((()=>{let e;return t=g(t),e=null==s?h(null,t.length):g(s),this.runInternalGraph(t,e)[1]}))}computeOutputShape(t){const e=Bt(t);if(e.length!==this.inputLayers.length)throw new a(`Invalid inputShape argument ${t}: model has ${this.inputLayers.length} tensor inputs.`);const s={};for(let t=0;t<e.length;t++){const i=this.inputLayers[t],n=e[t];s[i.name+"_0_0"]=n}const i=Object.keys(this.nodesByDepth).map((t=>parseInt(t,10))).sort(k);if(i.length>1)for(const t of i){const e=this.nodesByDepth[t];for(const t of e){const e=t.outboundLayer;if(-1!==this.inputLayers.map((t=>t.id)).indexOf(e.id))continue;const i=[];for(let e=0;e<t.inboundLayers.length;e++){const n=t.inboundLayers[e],r=t.nodeIndices[e],a=t.tensorIndices[e],o=s[`${n.name}_${r}_${a}`];i.push(o)}const n=Bt(e.computeOutputShape(d(i))),r=e.inboundNodes.indexOf(t);for(let t=0;t<n.length;t++){s[`${e.name}_${r}_${t}`]=n[t]}}}const n=[],r=[];for(let t=0;t<this.outputLayers.length;t++){const e=this.outputLayers[t],s=this.outputLayersNodeIndices[t],i=this.outputLayersTensorIndices[t],n=`${e.name}_${s}_${i}`;r.push(n)}for(let t=0;t<r.length;t++){const e=r[t];c(e in s),n.push(s[e])}return d(n)}runInternalGraph(t,e){null==e&&(e=h(null,t.length));const s={};for(let i=0;i<this.inputs.length;++i){const n=this.inputs[i],r=t[i],a=e[i];s[n.id]=[r,a]}const i=Object.keys(this.nodesByDepth).map((t=>parseInt(t,10))).sort(k);for(const t of i){const e=this.nodesByDepth[t];for(const t of e){const e=t.outboundLayer,i=t.inputTensors,n=t.outputTensors,r=new Array;for(const t of i)t.id in s&&r.push(s[t.id]);if(r.length===i.length){let i,a,l,u,h={};if(null!=t.callArgs&&(h=t.callArgs),1===r.length){const[t,s]=r[0];null==h.mask&&(h.mask=s),l=g(e.call(t,h)),u=g(e.computeMask(t,s)),i=[t],a=[s]}else i=r.map((t=>t[0])),a=r.map((t=>t[1])),null==h.mask&&(h.mask=a),l=g(e.call(i,h)),u=g(e.computeMask(i,a));if(e.activityRegularizer)throw new o("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let t=0;t<n.length;++t){const e=n[t],i=l[t],r=u[t];s[e.id]=[i,r]}}}}const n=[],r=[],a=[];for(const t of this.outputs){c(t.id in s,`Could not compute output ${t.name} : ${t.id}`);const[e,i]=s[t.id];a.push(e.shape),n.push(e),r.push(i)}return[n,r,a]}buildNodeConversionMap(t){const e={};let s;for(const t of this.layers){s=t instanceof ps?1:0;for(let i=0;i<t.inboundNodes.length;i++){const n=ps.nodeKey(t,i);this.containerNodes.has(n)&&(e[n]=s,s+=1)}}return e}getLayer(t,e){if(null!=e){if(this.layers.length<=e)throw new a(`Was asked to retrieve layer at index ${e}, but model only has ${this.layers.length} layer(s).`);return this.layers[e]}if(null==t)throw new a("Provide either a layer name or layer index");for(const e of this.layers)if(e.name===t)return e;throw new a(`No such layer: ${t}`)}calculateLosses(){return e.tidy((()=>{const t=[];for(const e of this.layers)for(let s=0;s<e.inboundNodes.length;++s){const i=ps.nodeKey(e,s);this.containerNodes.has(i)&&t.push(...e.calculateLosses())}return t}))}getConfig(){const t={name:this.name},e=this.buildNodeConversionMap(this.layers),s=[];for(const t of this.layers){const i=t.getClassName(),n=t.getConfig(),r=[];for(let s=0;s<t.inboundNodes.length;s++){const i=t.inboundNodes[s],n=ps.nodeKey(t,s);let a={};if(this.containerNodes.has(n)){if(i.callArgs)try{JSON.stringify(i.callArgs),a=i.callArgs}catch(e){console.warn(`Layer ${t.name} was passed non-serializable keyword arguments: ${i.callArgs}. They will not be included in the serialized model (and thus will be missing at deserialization time).`),a={}}if(i.inboundLayers.length>0){const t=[];for(let s=0;s<i.inboundLayers.length;s++){const n=i.inboundLayers[s],r=i.nodeIndices[s],o=i.tensorIndices[s];let l=e[ps.nodeKey(n,r)];null==l&&(l=0),t.push([n.name,l,o,a])}r.push(t)}}}const a={};a.name=t.name,a.className=i,a.config=n,a.inboundNodes=r,s.push(a)}t.layers=s;const i=[];for(let t=0;t<this.inputLayers.length;t++){const s=this.inputLayers[t],n=this.inputLayersNodeIndices[t],r=ps.nodeKey(s,n);if(!this.containerNodes.has(r))continue;let a=e[r];null==a&&(a=0);const o=this.inputLayersTensorIndices[t];i.push([s.name,a,o])}t.inputLayers=i;const n=[];for(let t=0;t<this.outputLayers.length;t++){const s=this.outputLayers[t],i=this.outputLayersNodeIndices[t],r=ps.nodeKey(s,i);if(!this.containerNodes.has(r))continue;let a=e[r];null==a&&(a=0);const o=this.outputLayersTensorIndices[t];n.push([s.name,a,o])}return t.outputLayers=n,t}static fromConfig(t,e,s={},i=!1){const n={},r={};function o(t,e){t.name in r?r[t.name].push(e):r[t.name]=[e]}function l(t,e){const s=[];let i;for(const r of e){const a=r[0],l=r[1],u=r[2];if(i=null==r[3]?{}:r[3],!(a in n))return void o(t,e);const h=n[a];if(h.inboundNodes.length<=l)return void o(t,e);const c=h.inboundNodes[l];s.push(c.outputTensors[u])}s.length>0&&t.apply(d(s),i)}function u(t){const s=t.name,r=De(t,null!=e.customObjects?e.customObjects:{});r.setFastWeightInitDuringBuild(i),n[s]=r;t.inboundNodes.forEach((t=>{if(!(t instanceof Array))throw new a(`Corrupted configuration, expected array for nodeData: ${t}`);o(r,t)}))}const h=e.name,p=e.layers;for(const t of p)u(t);for(;!v(r);)for(const t of p){const e=n[t.name];if(e.name in r){const t=r[e.name];delete r[e.name];for(const s of t)l(e,s)}}const g=[],f=[],m=e.inputLayers;for(const t of m){const e=t[0],s=t[1],i=t[2];c(e in n);const r=n[e].inboundNodes[s].outputTensors;g.push(r[i])}const y=e.outputLayers;for(const t of y){const e=t[0],s=t[1],i=t[2];c(e in n);const r=n[e].inboundNodes[s].outputTensors;f.push(r[i])}return new t({inputs:g,outputs:f,name:h})}get stateful(){if(this._stateful)throw new a("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const t of this.layers)if(t.stateful)return!0;return!1}resetStates(){e.tidy((()=>{this.layers.forEach((t=>{t.stateful&&t.resetStates()}))}))}}function ds(t,e){return function(t,e,s){const i=e.length;if(null==t||Array.isArray(t)&&0===t.length)return e.map((t=>null));if(1===i)return Array.isArray(t)&&1===t.length?t:"object"==typeof t&&e[0]in t?[t[e[0]]]:[t];if(Array.isArray(t)){if(t.length!==i)throw new Error(`Provided ${s} is an array of ${t.length} element(s), but the model has ${i} outputs. Make sure a set of weights is provided for each model output.`);return t}if("object"==typeof t&&Object.keys(t).length>0&&"object"==typeof t[Object.keys(t)[0]]){const s=[];return e.forEach((e=>{e in t?s.push(t[e]):s.push(null)})),s}throw new Error(`The model has multiple (${i}) outputs, so ${s} must be either an array with ${i} elements or an object with ${e} keys. Provided ${s} not understood: ${JSON.stringify(t)}`)}(t,e,"classWeight")}async function gs(t,s,i,n){if(null!=s||null!=n)throw new Error("Support sampleWeight is not implemented yet");if(null!=i){const s=e.tidy((()=>{if(1===t.shape.length)return e.clone(t);if(2===t.shape.length){if(t.shape[1]>1){const s=1;return e.argMax(t,s)}if(1===t.shape[1])return e.reshape(t,[t.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${t.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}throw new Error(`Unexpected rank of target (y) tensor (${t.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)})),n=Array.from(await s.data());e.dispose(s);const r=[];return n.forEach((t=>{if(null==i[t])throw new Error(`classWeight must contain all classes in the training data. The class ${t} exists in the data but not in classWeight`);r.push(i[t])})),e.tensor1d(r,"float32")}return null}function fs(t,s){return e.mul(t,s)}function ms(t,e){let s,n;const r=e;s=r.xs,n=r.ys,i.util.assert(null!=s&&null!=n,(()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${e}`));const a=ys("input",t.inputNames,s),o=ys("output",t.outputNames,n),l=a[0].shape[0];i.util.assert(a.length===t.inputs.length,(()=>`LayersModel has ${t.inputs.length} inputs, but the dataset provides ${a.length} inputs. (Expected input keys: ${JSON.stringify(t.inputNames)})`)),i.util.assert(o.length===t.outputs.length,(()=>`LayersModel has ${t.outputs.length} outputs, but the dataset provides ${o.length} outputs. (Expected output keys: ${JSON.stringify(t.outputNames)})`));for(let e=0;e<a.length;e++)i.util.assert(a[e].shape[0]===l,(()=>`Batch size mismatch: input ${t.inputNames[e]} has ${a[e].shape[0]}; expected ${l} based on input ${t.inputNames[0]}.`));for(let e=0;e<o.length;e++)i.util.assert(o[e].shape[0]===l,(()=>`Batch size mismatch: output ${t.outputNames[e]} has ${o[e].shape[0]}; expected ${l} based on input ${t.inputNames[0]}.`));return{xs:a,ys:o}}function ys(t,e,s){if(s instanceof i.Tensor)return[s];if(Array.isArray(s))return i.util.assert(s.length===e.length,(()=>`Received an array of ${s.length} Tensors, but expected ${e.length} to match the ${t} keys ${e}.`)),s;{const i=[];for(const n of e){if(null==s[n])throw new a(`The feature data generated by the dataset lacks the required ${t} key '${n}'.`);i.push(s[n])}return i}}async function bs(t,e,s){const n=null!=s.batchesPerEpoch;if(i.util.assert(null!=t.optimizer,(()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig).")),i.util.assert(null!=s,(()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call.")),i.util.assert(null!=s.epochs&&s.epochs>0&&Number.isInteger(s.epochs),(()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${s.epochs}`)),i.util.assert(!n||s.batchesPerEpoch>0&&Number.isInteger(s.batchesPerEpoch),(()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${s.batchesPerEpoch}`)),i.util.assert(null==s.validationSplit,(()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead.")),t.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");t.isTraining=!0;try{const r=null!=s.validationData;let a,l;if(r)if(ws(s.validationData))i.util.assert(null==s.validationBatches||s.validationBatches>0&&Number.isInteger(s.validationBatches),(()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${s.validationBatches}`));else{const t=function(t){if(3===t.length)throw new o("Validation with sample weights is not implemented yet.");return{xs:t[0],ys:t[1]}}(s.validationData);a=t.xs,l=t.ys}const u=t.makeTrainFunction(),h=t.getDedupedMetricsNames();let c;c=r?h.slice().concat(h.map((t=>"val_"+t))):h.slice();const p=Le(s.callbacks,s.yieldEvery),d=null==s.verbose?1:s.verbose,{callbackList:f,history:m}=Ee(p,d,s.epochs,null,null,function(t,e){let s=null;null!=e.batchesPerEpoch?s=e.batchesPerEpoch:Number.isFinite(t.size)&&(s=t.size);return s}(e,s),null,r,c);f.setModel(t),t.history=m,await f.onTrainBegin(),t.stopTraining_=!1;let y=null==s.initialEpoch?0:s.initialEpoch,b=await e.iterator();for(;y<s.epochs;){const o={};await f.onEpochBegin(y);let c=0,p=0;for(n||(b=await e.iterator());!n||c<s.batchesPerEpoch;){const e=await b.next();if(n&&e.done){console.warn(`You provided \`batchesPerEpoch\` as ${s.batchesPerEpoch}, but your dataset iterator ran out of data after ${c} batches; interrupting training. Make sure that your dataset can generate at least \`batchesPerEpoch * epochs\` batches (in this case, `+s.batchesPerEpoch*s.epochs+" batches). You may need to use the repeat() function when building your dataset.");break}if(null!=e.value){const{xs:n,ys:r}=ms(t,e.value),a={};a.batch=p,a.size=n[0].shape[0],await f.onBatchBegin(p,a);const o=[];if(null!=s.classWeight){const e=ds(s.classWeight,t.outputNames);for(let t=0;t<e.length;++t)o.push(await gs(r[t],null,e[t]))}const l=n.concat(r).concat(o),d=u(l);i.dispose(l);for(let t=0;t<h.length;++t){const e=h[t],s=d[t];a[e]=s,i.keep(s)}await f.onBatchEnd(p,a),ve(a),p++,c++}if(n?c>=s.batchesPerEpoch:e.done){if(r){let e;e=ws(s.validationData)?g(await t.evaluateDataset(s.validationData,{batches:s.validationBatches})):g(t.evaluate(a,l,{batchSize:null==s.validationBatchSize?32:s.validationBatchSize,verbose:0}));for(let s=0;s<t.metricsNames.length;++s)o[`val_${t.metricsNames[s]}`]=e[s]}break}if(t.stopTraining_)break}if(await f.onEpochEnd(y,o),y++,t.stopTraining_)break}return await f.onTrainEnd(),await t.history.syncData(),t.history}finally{t.isTraining=!1}}function ws(t){return"function"==typeof t.iterator}function zs(t){i.util.assert(t>0&&Number.isInteger(t),(()=>`batchSize is required to be a positive integer, but got ${t}`))}function ks(t,e,s){return null==t?[null]:Array.isArray(t)?t.map((t=>st(t,e,s-e))):st(t,e,s-e)}function Ss(t,e){return i.tidy((()=>null==t?null:Array.isArray(t)?t.map((t=>Ss(t,e))):ht(t,"int32"===e.dtype?e:i.cast(e,"int32"))))}function vs(t,e){const s=[];let i=0,n=null;for(;i<t;)n=i+e,n>=t&&(n=t),s.push([i,n]),i=n;return s}async function Ns(t,s,n,r={}){if(t.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");let l,u,h,c,p,d,g,f,m;t.isTraining=!0;try{const y=null==r.batchSize?32:r.batchSize;zs(y);const b=!1,w=await t.standardizeUserData(s,n,r.sampleWeight,r.classWeight,b,y);l=w[0],u=w[1],m=w[2];let z,k=!1;if(null!=r.validationData&&r.validationData.length>0){if(k=!0,2!==r.validationData.length)throw 3===r.validationData.length?new o("validationData including sample weights is not supported yet."):new a(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${r.validationData} is invalid.`);p=r.validationData[0],d=r.validationData[1];const e=!0,s=await t.standardizeUserData(p,d,null,null,e,y);g=s[0],f=s[1],z=g.concat(f)}else if(null!=r.validationSplit&&r.validationSplit>0&&r.validationSplit<1){k=!0;const t=Math.floor(l[0].shape[0]*(1-r.validationSplit)),e=l[0].shape[0];g=ks(l,t,e),h=l,l=ks(l,0,t),f=ks(u,t,e),c=u,u=ks(u,0,t),z=g.concat(f)}else null!=r.validationSteps&&(k=!0);const S=l.concat(u).concat(m);t.checkTrainableWeightsConsistency();const v=t.makeTrainFunction(),N=t.getDedupedMetricsNames();let C,A;k?(t.makeTestFunction(),C=t.testFunction,A=N.slice().concat(N.map((t=>"val_"+t)))):(C=null,z=[],A=N.slice());const x=Le(r.callbacks,r.yieldEvery),I=await async function(t,s,n,r,l,u,h,c,p,d,g,f,m,y,b){null==l&&(l=32),null==u&&(u=1),null==g&&(g=!0),null==m&&(m=0);let w=!1;if(null!=p&&null!=d&&(w=!0),null!=b&&(w=!0,null==y))throw new a("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");const z=t.checkNumSamples(n,l,y,"steps_per_epoch");let k;null!=z&&(k=Y(0,z)),null==h&&(h=1);const{callbackList:S,history:v}=Ee(c,h,u,m,z,y,l,w,f);S.setModel(t),t.history=v,await S.onTrainBegin(),t.stopTraining_=!1;for(let a=m;a<u;++a){await S.onEpochBegin(a);const u={};if(null!=y)throw new o("stepsPerEpoch mode is not implemented yet.");{if("batch"===g)throw new o("batch shuffling is not implemneted yet");g&&e.util.shuffle(k);const a=e.tensor1d(k),h=vs(z,l);for(let e=0;e<h.length;++e){const o={};if(await S.onBatchBegin(e,o),i.tidy((()=>{const c=h[e][0],g=h[e][1],f=st(a,c,g-c);o.batch=e,o.size=g-c;const m=Ss(n,f),y=s(m);for(let t=0;t<r.length;++t){const e=r[t],s=y[t];o[e]=s,i.keep(s)}if(e===h.length-1&&w){const e=t.testLoop(p,d,l);for(let t=0;t<r.length;++t){const s=r[t],n=e[t];i.keep(n),u["val_"+s]=n}}})),await S.onBatchEnd(e,o),ve(o),t.stopTraining_)break}a.dispose()}if(await S.onEpochEnd(a,u),t.stopTraining_)break}return await S.onTrainEnd(),await t.history.syncData(),t.history}(t,v,S,N,y,r.epochs,r.verbose,x,C,z,r.shuffle,A,r.initialEpoch,null,null);return I}finally{t.isTraining=!1,As(l,s),As(u,n),As(h,s),As(c,n),As(g,p),As(f,d),null!=m&&i.dispose(m)}}function Cs(t){const s=[];t instanceof e.Tensor&&(t=[t]);for(let e=0;e<t.length;++e){const i=t[e];if(1===i.rank)s.push(et(i,1));else{if(0===i.rank)throw new Error("Expected tensor to be at least 1D, but received a 0D tensor (scalar).");s.push(i)}}return s}function As(t,s){if(null==t)return;const i=[];if(s instanceof e.Tensor)i.push(s.id);else if(Array.isArray(s))s.forEach((t=>i.push(t.id)));else if(null!=s)for(const t in s){const e=s[t];i.push(e.id)}const n=[];if(t instanceof e.Tensor)-1===i.indexOf(t.id)&&n.push(t);else if(Array.isArray(t))t.forEach((t=>{-1===i.indexOf(t.id)&&n.push(t)}));else if(null!=t)for(const e in t){const s=t[e];-1===i.indexOf(s.id)&&n.push(s)}n.forEach((t=>{t.isDisposed||t.dispose()}))}function xs(t){return Array.isArray(t)}function Is(t){return!function(t){return t instanceof e.Tensor}(t)&&!xs(t)}function Ls(t,e,s,i=!0,n=""){if(null==e||0===e.length){if(null!=t){let e=!1;if(xs(t)&&t.length>0)e=!0;else if(Is(t)){for(const s in t)if(t.hasOwnProperty(s)){e=!0;break}}else e=!0;if(e)throw new a(`Error when checking model ${n} expected no data, but got ${t}`)}return[]}if(null==t)return e.map((t=>null));let r;if(Is(t)){r=[];for(const s of e){if(null==t[s])throw new a(`No data provided for "${s}". Need data for each key in: ${e}`);r.push(t[s])}}else if(xs(t)){if(t.length!==e.length)throw new a(`Error when checking model ${n}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${e.length} Tensor(s), but instead got the following list of Tensor(s): ${t}`);r=t}else{if(e.length>1)throw new a(`The model ${n} expects ${e.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${t.shape}`);r=[t]}if(r=Cs(r),null!=s)for(let t=0;t<e.length;++t){if(null==s[t])continue;const o=r[t];if(o.shape.length!==s[t].length)throw new a(`Error when checking ${n}: expected ${e[t]} to have ${s[t].length} dimension(s). but got array with shape ${o.shape}`);for(let e=0;e<s[t].length;++e){if(0===e&&!i)continue;const r=o.shape[e],l=s[t][e];if(null!=l&&l>=0&&r!==l)throw new a(`${n} expected a batch of elements where each example has shape [${s[t].slice(1,s[t].length)}] (i.e.,tensor shape [*,${s[t].slice(1,s[t].length)}]) but the ${n} received an input with ${o.shape[0]} examples, each with shape [${o.shape.slice(1,o.shape.length)}] (tensor shape [${o.shape}])`)}}return r}function Ts(t,e,s,i=!0,n=""){let r;if(Array.isArray(t)){if(t.length!==e.length)throw new a(`Error when checking model ${n}: the Array of Tensors that you are passing to your model is not the size the the model expected. Expected to see ${e.length} Tensor(s), but instead got ${t.length} Tensors(s).`);r=t}else{if(e.length>1)throw new a(`The model expects ${e.length} ${n} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(t.shape)}.`);r=[t]}if(null!=s)for(let t=0;t<e.length;++t){if(null==s[t])continue;const o=r[t];if(o.shape.length!==s[t].length)throw new a(`Error when checking ${n}: expected ${e[t]} to have ${s[t].length} dimension(s), but got array with shape ${JSON.stringify(o.shape)}`);for(let r=0;r<s[t].length;++r){if(0===r&&!i)continue;const l=o.shape[r],u=s[t][r];if(null!=u&&u!==l)throw new a(`Error when checking ${n}: expected ${e[t]} to have shape ${JSON.stringify(s[t])} but got array with shape ${JSON.stringify(o.shape)}.`)}}}class Es extends ps{constructor(t){super(t),this.isTraining=!1}summary(t,e,s=console.log){if(!this.built)throw new a("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");ns(this,t,e,s)}compile(t){if(null==t.loss&&(t.loss=[]),this.loss=t.loss,"string"==typeof t.optimizer)this.optimizer_=function(t){const s={Adagrad:()=>e.train.adagrad(.01),Adadelta:()=>e.train.adadelta(1,.95,Q()),Adam:()=>e.train.adam(.001,.9,.999,Q()),Adamax:()=>e.train.adamax(.002,.9,.999,Q(),0),RMSProp:()=>e.train.rmsprop(.001,.9,0,Q()),SGD:()=>e.train.sgd(.01)};if(s.adagrad=s.Adagrad,s.adadelta=s.Adadelta,s.adam=s.Adam,s.adamax=s.Adamax,s.rmsprop=s.RMSProp,s.sgd=s.SGD,t in s)return s[t]();throw new a(`Unknown Optimizer ${t}`)}(t.optimizer),this.isOptimizerOwned=!0;else{if(!(t.optimizer instanceof e.Optimizer))throw new a("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=t.optimizer,this.isOptimizerOwned=!1}let s=[];if(Array.isArray(t.loss)||"string"==typeof t.loss||"function"==typeof t.loss)if(Array.isArray(t.loss)){if(t.loss.length!==this.outputs.length)throw new a(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${t.loss}.`);const e=t.loss;s=e.map((t=>Pe(t)))}else{const e=Pe(t.loss);this.outputs.forEach((t=>{s.push(e)}))}else{t.loss=t.loss;for(const e in t.loss)if(-1===this.outputNames.indexOf(e))throw new a(`Unknown entry in loss dictionary: "${e}". Only expected the following keys: ${this.outputNames}`);for(const e of this.outputNames)null==t.loss[e]&&console.warn(`Output "${e}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${e} during training`),s.push(Pe(t.loss[e]))}this.lossFunctions=s,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let t=0;t<this.outputs.length;++t){const e=this.internalOutputShapes[t],s=this.outputNames[t];this.feedOutputNames.push(s),this.feedOutputShapes.push(e),this.feedLossFns.push(this.lossFunctions[t])}const i=[];this.metrics=t.metrics,this.metricsNames=["loss"],this.metricsTensors=[],j("loss",(()=>{for(let t=0;t<this.outputs.length;++t){if(-1!==i.indexOf(t))continue;const e=this.lossFunctions[t];this.outputs.length>1&&(this.metricsTensors.push([e,t]),this.metricsNames.push(this.outputNames[t]+"_loss"))}}));const n=function(t,e){if(null==t||Array.isArray(t)&&0===t.length)return e.map((t=>[]));let s;if("string"==typeof t||"function"==typeof t)s=[t];else{if(!Array.isArray(t)&&"object"!=typeof t)throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${t}`);s=t}if(Array.isArray(s))return e.map((t=>s));{const t=[];for(const i of e){let e=s.hasOwnProperty(i)?s[i]:[];Array.isArray(e)||(e=[e]),t.push(e)}return t}}(t.metrics,this.outputNames),r=(t,e,s)=>{this.outputNames.length>1&&(e=this.outputNames[t]+"_"+e),this.metricsNames.push(e),this.metricsTensors.push([s,t])};j("metric",(()=>{for(let t=0;t<this.outputs.length;++t){if(-1!==i.indexOf(t))continue;(e=>{let s,i,n;for(const a of e){if("string"==typeof a&&-1!==["accuracy","acc","crossentropy","ce"].indexOf(a)){const e=this.internalOutputShapes[t];let r;1===e[e.length-1]||this.lossFunctions[t]===Be?-1!==["accuracy","acc"].indexOf(a)?i=je:-1!==["crossentropy","ce"].indexOf(a)&&(i=Ze):this.lossFunctions[t]===Me?-1!==["accuracy","acc"].indexOf(a)?i=Ke:-1!==["crossentropy","ce"].indexOf(a)&&(i=Ye):-1!==["accuracy","acc"].indexOf(a)?i=Ve:-1!==["crossentropy","ce"].indexOf(a)&&(i=Ge),-1!==["accuracy","acc"].indexOf(a)?r="acc":-1!==["crossentropy","ce"].indexOf(a)&&(r="ce"),n=i,s=""+r}else{const t=Qe(a);n=t,s=""+ts(a)}let e;j(s,(()=>{e=n})),r(t,s,e)}})(n[t])}})),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){null!=this.collectedTrainableWeights&&this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(t,e,s={}){const i=null==s.batchSize?32:s.batchSize;zs(i);const n=this.standardizeUserDataXY(t,e,!0,i);try{const r=n[0].concat(n[1]);this.makeTestFunction();const a=this.testFunction;return d(this.testLoop(a,r,i,s.verbose,s.steps))}finally{As(n[0],t),As(n[1],e)}}async evaluateDataset(t,s){return this.makeTestFunction(),async function(t,s,n){const r=null!=(n=n||{}).batches,a=t.testFunction;let l=[];if(n.verbose>0)throw new o("Verbose mode is not implemented yet.");i.util.assert(!r||n.batches>0&&Number.isInteger(n.batches),(()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`));const u="function"==typeof s.next?s:await s.iterator();let h=0,c=0;for(;!r||c<n.batches;){const s=await u.next();if(l=i.tidy((()=>{if(s.value){const{xs:n,ys:r}=ms(t,s.value),o=n.concat(r),u=i.tidy((()=>a(o)));if(i.dispose(o),0===c)for(let t=0;t<u.length;++t)l.push(e.scalar(0));const p=o[0].shape[0];for(let t=0;t<u.length;++t){const e=u[t],s=l[t];l[t]=i.tidy((()=>i.add(l[t],i.mul(p,e)))),c>0&&i.dispose(s)}i.dispose(u),h+=p,++c}return l})),s.done){r&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let t=0;t<l.length;++t){const e=l[t];l[t]=i.div(l[t],h),i.dispose(e)}return d(l)}(this,t,s)}checkNumSamples(t,e,s,i="steps"){let n;if(null!=s){if(n=null,null!=e)throw new a(`If ${i} is set, batchSize must be null or undefined.Got batchSize = ${e}`)}else{if(null==t)throw new a(`Either the input data should have a defined shape, or ${i} shoud be specified.`);n=Array.isArray(t)?t[0].shape[0]:t.shape[0]}return n}execute(t,s){if(Array.isArray(s)&&0===s.length)throw new a("`outputs` is an empty Array, which is not allowed.");const i=Array.isArray(s),n=i?s:[s],r=this.retrieveSymbolicTensors(n),o=new se;if(t instanceof e.Tensor&&(t=[t]),Array.isArray(t)){if(t.length!==this.inputs.length)throw new a(`The number of inputs provided (${t.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let e=0;e<this.inputs.length;++e)o.add(this.inputs[e],t[e])}else for(const e of this.inputs){const s=t[e.name];if(null==s)throw new a(`No value is provided for the model's input ${e.name}`);o.add(e,s)}const l=re(r,o);return i?l:l[0]}retrieveSymbolicTensors(t){const e=h(null,t.length);let s=t.length;for(const i of this.layers){const n=Array.isArray(i.output)?i.output:[i.output],r=n.map((t=>t.name));for(let i=0;i<t.length;++i){const a=r.indexOf(t[i]);if(-1!==a&&(e[i]=n[a],s--),0===s)break}if(0===s)break}if(s>0){const s=[];throw e.forEach(((e,i)=>{null==e&&s.push(t[i])})),new a(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(s)}`)}return e}predictLoop(t,e=32,s=!1){return i.tidy((()=>{const n=this.checkNumSamples(t);if(s)throw new o("Verbose predictLoop() is not implemented yet.");const r=vs(n,e),a=this.outputs.map((t=>[]));for(let e=0;e<r.length;++e){i.tidy((()=>{const s=r[e][0],i=r[e][1],n=ks(t,s,i),a=[];if(Array.isArray(n))for(let t=0;t<n.length;++t)a.push({key:this.inputs[t],value:n[t]});else a.push({key:this.inputs[0],value:n});const o=new se(a);return re(this.outputs,o)})).forEach(((t,e)=>a[e].push(t)))}return d(a.map((t=>i.concat(t,0))))}))}predict(t,e={}){const s=Cs(t);Ts(s,this.inputNames,this.feedInputShapes,!1);try{const i=null==e.batchSize?32:e.batchSize;return zs(i),this.predictLoop(s,i)}finally{As(s,t)}}predictOnBatch(t){Ts(t,this.inputNames,this.feedInputShapes,!0);const e=(Array.isArray(t)?t[0]:t).shape[0];return this.predictLoop(t,e)}standardizeUserDataXY(t,s,i=!0,n){if(null==this.optimizer_)throw new r("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const o=[];for(let t=0;t<this.feedOutputShapes.length;++t){const e=this.feedOutputShapes[t];this.feedLossFns[t]===Me?o.push(e.slice(0,e.length-1).concat([1])):o.push(e)}if(function(t,s,i){const n=S(t.map((t=>t.shape[0])));n.sort();const r=S(s.map((t=>t.shape[0])));if(r.sort(),n.length>1)throw new a(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map((t=>t.shape)))}`);if(r.length>1)throw new a(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(s.map((t=>t.shape)))}`);if(n.length>0&&r.length>0&&!e.util.arraysEqual(n,r))throw new a(`Input Tensors should have the same number of samples as target Tensors. Found ${n[0]} input sample(s) and ${r[0]} target sample(s).`)}(t=Ls(t,this.feedInputNames,this.feedInputShapes,!1,"input"),s=Ls(s,this.feedOutputNames,o,!1,"target")),function(t,e,s){const i=[Fe,Be,_e];for(let n=0;n<t.length;++n){const r=t[n],o=e[n],l=s[n];if(null!=o){if(o===_e&&1===r.shape[r.shape.length-1])throw new a(`You are passing a target array of shape ${r.shape} while using a loss 'categorical_crossentropy'. 'categorical_crossentropy'expects targets to be binary matrices (1s and 0s) of shape [samples, classes].`);if(-1!==i.indexOf(o)){const t=r.shape.slice(1),e=l.slice(1);for(let s=0;s<t.length;++s){const i=t[s],n=e[s];if(null!=n&&i!==n)throw new a(`A target Tensor with shape ${r.shape} was passed for an output of shape ${l}, while using a loss function that expects targets to have the same shape as the output.`)}}}}}(s,this.feedLossFns,this.feedOutputShapes),this.stateful&&null!=n&&n>0&&t[0].shape[0]%n!=0)throw new a(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${n}. Found: ${t[0].shape[0]} sample(s).`);return[t,s]}async standardizeUserData(t,e,s,i,n=!0,r){const[a,o]=this.standardizeUserDataXY(t,e,n,r);if(null!=s)throw new Error("sample weight is not supported yet.");let l=null;if(null!=i){const t=ds(i,this.outputNames);l=[];for(let e=0;e<t.length;++e)l.push(await gs(o[e],null,t[e]))}return[a,o,l]}testLoop(t,s,n,r=0,a){return i.tidy((()=>{const l=this.checkNumSamples(s,n,a,"steps"),u=[];if(r>0)throw new o("Verbose mode is not implemented yet.");if(null!=a)throw new o("steps mode in testLoop() is not implemented yet");{const r=vs(l,n),a=e.tensor1d(Y(0,l));for(let n=0;n<r.length;++n){const o=r[n][0],l=r[n][1],h=st(a,o,l-o),c=Ss(s,h),p=t(c);if(0===n)for(let t=0;t<p.length;++t)u.push(e.scalar(0));for(let t=0;t<p.length;++t){const e=p[t];u[t]=i.add(u[t],i.mul(l-o,e))}}for(let t=0;t<u.length;++t)u[t]=i.div(u[t],l)}return u}))}getDedupedMetricsNames(){const t=this.metricsNames,e=[];for(let s=0;s<t.length;++s){const i=t[s];let n=i;if(p(t,i)>1){n+=`_${p(t.slice(0,s),i)}`}e.push(n)}return e}makeTrainFunction(){return t=>{const e=[],s=t.slice(0,this.inputs.length),n=t.slice(this.inputs.length,this.inputs.length+this.outputs.length),r=t.slice(this.inputs.length+this.outputs.length,this.inputs.length+2*this.outputs.length),a=[],o=this.collectedTrainableWeights.map((t=>t.read()));return[this.optimizer_.minimize((()=>{const t=[];for(let e=0;e<this.inputs.length;++e)t.push({key:this.inputs[e],value:s[e]});const o=new se(t),l=re(this.outputs,o,{training:!0});let u;for(let t=0;t<this.lossFunctions.length;++t){let s=(0,this.lossFunctions[t])(n[t],l[t]);null!=r[t]&&(s=fs(s,r[t]));const a=i.mean(s);e.push(a),u=0===t?s:i.add(u,s)}for(let t=0;t<this.metricsTensors.length;++t){let s;if(this.outputs.length>1&&t<this.outputs.length)s=e[t];else{const e=this.metricsTensors[t][0],r=this.metricsTensors[t][1];s=i.mean(e(n[r],l[r]))}i.keep(s),a.push(s)}return u=i.mean(u),this.calculateLosses().forEach((t=>{u=i.add(u,t)})),u}),!0,o)].concat(a)}}makeTestFunction(){this.testFunction=t=>i.tidy((()=>{const e=[];let s;const n=t.slice(0,this.inputs.length),r=t.slice(this.inputs.length,this.inputs.length+this.outputs.length),a=[];for(let t=0;t<this.inputs.length;++t)a.push({key:this.inputs[t],value:n[t]});const o=new se(a),l=re(this.outputs,o);for(let t=0;t<this.lossFunctions.length;++t){const n=this.lossFunctions[t],a=i.mean(n(r[t],l[t]));s=0===t?a:i.add(s,a),e.push(s)}for(let t=0;t<this.metricsTensors.length;++t){const s=this.metricsTensors[t][0],n=this.metricsTensors[t][1],a=i.mean(s(r[n],l[n]));e.push(a)}return e}))}async fit(t,e,s={}){return Ns(this,t,e,s)}async fitDataset(t,e){return bs(this,t,e)}async trainOnBatch(t,e){const s=await this.standardizeUserData(t,e),n=s[0],r=s[1],a=this.makeTrainFunction()(n.concat(r)),o=[];for(const t of a){const e=await t.data();o.push(e[0])}return i.dispose(a),As(s[0],t),As(s[1],e),d(o)}getNamedWeights(t){const e=[],s=null!=t&&t.trainableOnly,i=s?this.trainableWeights:this.weights,n=this.getWeights(s);for(let t=0;t<i.length;++t)s&&!i[t].trainable||e.push({name:i[t].originalName,tensor:n[t]});return e}set stopTraining(t){this.stopTraining_=t}get stopTraining(){return this.stopTraining_}get optimizer(){return this.optimizer_}set optimizer(t){this.optimizer_!==t&&(this.optimizer_=t,this.isOptimizerOwned=!1)}dispose(){const t=super.dispose();if(0===t.refCountAfterDispose&&null!=this.optimizer&&this.isOptimizerOwned){const e=i.memory().numTensors;this.optimizer_.dispose(),t.numDisposedVariables+=e-i.memory().numTensors}return t}getLossIdentifiers(){let t;if("string"==typeof this.loss)t=f(this.loss);else if(Array.isArray(this.loss)){for(const t of this.loss)if("string"!=typeof t)throw new Error("Serialization of non-string loss is not supported.");t=this.loss.map((t=>f(t)))}else{const e=Object.keys(this.loss);t={};const s=this.loss;for(const i of e){if("string"!=typeof s[i])throw new Error("Serialization of non-string loss is not supported.");t[i]=f(s[i])}}return t}getMetricIdentifiers(){if("string"==typeof this.metrics||"function"==typeof this.metrics)return[f(ts(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map((t=>f(ts(t))));{const t={};for(const e in this.metrics)t[e]=f(ts(this.metrics[e]));return t}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(t){if(null!=t.weighted_metrics)throw new Error("Loading weight_metrics is not supported yet.");if(null!=t.loss_weights)throw new Error("Loading loss_weights is not supported yet.");if(null!=t.sample_weight_mode)throw new Error("Loading sample_weight_mode is not supported yet.");const e=De(us(t.optimizer_config));let s,i;if("string"==typeof t.loss)s=m(t.loss);else if(Array.isArray(t.loss))s=t.loss.map((t=>m(t)));else if(null!=t.loss){s={};for(const e in t.loss)s[e]=m(t.loss[e])}if(Array.isArray(t.metrics))i=t.metrics.map((t=>m(t)));else if(null!=t.metrics){i={};for(const e in t.metrics)i[e]=m(t.metrics[e])}this.compile({loss:s,metrics:i,optimizer:e})}async save(t,s){if("string"==typeof t){const s=e.io.getSaveHandlers(t);if(0===s.length)throw new a(`Cannot find any save handlers for URL '${t}'`);if(s.length>1)throw new a(`Found more than one (${s.length}) save handlers for URL '${t}'`);t=s[0]}if(null==t.save)throw new a("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const i=await e.io.encodeWeights(this.getNamedWeights(s)),n={modelTopology:this.toJSON(null,!1),format:"layers-model",generatedBy:"TensorFlow.js tfjs-layers v3.20.0",convertedBy:null};if(null!=s&&s.includeOptimizer&&null!=this.optimizer){n.trainingConfig=this.getTrainingConfig();const t="optimizer",{data:s,specs:r}=await e.io.encodeWeights(await this.optimizer.getWeights(),t);i.specs.push(...r),i.data=e.io.concatenateArrayBuffers([i.data,s])}if(null!=this.userDefinedMetadata){const t=!0;ss(this.userDefinedMetadata,this.name,t),n.userDefinedMetadata=this.userDefinedMetadata}return n.weightData=i.data,n.weightSpecs=i.specs,t.save(n)}setUserDefinedMetadata(t){ss(t,this.name),this.userDefinedMetadata=t}getUserDefinedMetadata(){return this.userDefinedMetadata}}Es.className="Model",e.serialization.registerClass(Es);class Ds extends Es{}async function $s(t,s){if(null==s&&(s={}),"string"==typeof t){const i=e.io.getLoadHandlers(t,s);if(0===i.length)i.push(e.io.browserHTTPRequest(t,s));else if(i.length>1)throw new a(`Found more than one (${i.length}) load handlers for URL '${t}'`);t=i[0]}return async function(t,s,i){null==i&&(i={});if(null==t.load)throw new a("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const n=await t.load();let r=n.modelTopology;null!=r.model_config&&(r=r.model_config);const o=null==i.strict||i.strict,l=null!=n.weightData&&null!=n.weightSpecs&&o,u=De(us(r),s,l),h=n.trainingConfig;null!=h&&u.loadTrainingConfig(h);null!=n.userDefinedMetadata&&u.setUserDefinedMetadata(n.userDefinedMetadata);if(null!=n.weightData){if(null==n.weightSpecs)throw new a("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");const{modelWeights:t,optimizerWeights:s}=function(t,s){const i=e.io.decodeWeights(t,s),n={},r=[];return s.forEach((t=>{"optimizer"===t.group?r.push({name:t.name,tensor:i[t.name]}):n[t.name]=i[t.name]})),{modelWeights:n,optimizerWeights:r}}(n.weightData,n.weightSpecs);u.loadWeights(t,o),null!=u.optimizer&&s.length>0&&await u.optimizer.setWeights(s),e.dispose(t),e.dispose(s.map((t=>t.tensor)))}return u}(t,void 0,s)}Ds.className="Functional",e.serialization.registerClass(Ds);class Fs extends Es{constructor(t){if(super({inputs:[],outputs:[]}),t=t||{},this.trainable=!0,this.built=!1,this.name=null!=t.name?t.name:D("sequential_"),null!=t.layers)for(const e of t.layers)this.add(e)}checkShape(t){if(t.inboundNodes[0].outputTensors[0].shape.some((t=>t<0)))throw new a(`Negative dimension size caused by adding layer ${t.name} with input shape [${t.inboundNodes[0].inputTensors[0].shape}]`)}add(t){const e=t instanceof Fs||t instanceof Es;let s;if(e){if(s=t,1!==s.outputs.length)throw new a("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(1!==s.inputs.length)throw new a("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(0===this.outputs.length){if(0===t.inboundNodes.length){if(null==t.batchInputShape)throw new a("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const e=ee({batchShape:t.batchInputShape,dtype:t.dtype,name:t.name+"_input"});t.apply(e)}if(e)this.outputs=s.outputs,this.inputs=s.inputs;else{if(1!==t.inboundNodes.length)throw new a(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${t.name} which has ${t.inboundNodes.length} pre-existing inbound connections.`);if(1!==t.inboundNodes[0].outputTensors.length)throw new a("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(t),this.outputs=[t.inboundNodes[0].outputTensors[0]],this.inputs=Qt(this.outputs[0])}this.inboundNodes=[],new Gt({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:h(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map((t=>t.shape)),outputShapes:this.outputs[0].shape})}else{const e=t.apply(this.outputs[0]);if(Array.isArray(e))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(t),this.outputs=[e],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(t),this.built=!1}pop(){if(0===this.layers.length)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),0===this.layers.length)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const t=this.layers.length-1;this.layers[t].outboundNodes=[],this.outputs=[this.layers[t].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(t,e){return null==this.model&&this.build(),this.model.call(t,e)}build(t){if(Ut(t),0===this.inputs.length||0===this.outputs.length)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new Es({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(t,e,s=console.log){this.built||this.build(),super.summary(t,e,s)}setWeights(t){null==this.model&&this.build(),this.model.setWeights(t)}evaluate(t,e,s={}){if(!this.built)throw new r("The model needs to be compiled before being used.");return this.model.evaluate(t,e,s)}async evaluateDataset(t,e){if(!this.built)throw new r("The model needs to be compiled before being used.");return this.model.evaluateDataset(t,e)}predict(t,e={}){return null==this.model&&this.build(),this.model.predict(t,e)}predictOnBatch(t){return null==this.model&&this.build(),this.model.predictOnBatch(t)}compile(t){this.build(),this.model.compile(t),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return null==this.model?void 0:this.model.optimizer}set optimizer(t){this.model.optimizer=t}async fit(t,e,s={}){if(!this.built)throw new r("The model needs to be compiled before being used.");return this.model.fit(t,e,s)}async fitDataset(t,e){if(!this.built)throw new r("The model needs to be compiled before being used.");return this.model.fitDataset(t,e)}async trainOnBatch(t,e){return this.model.trainOnBatch(t,e)}static fromConfig(t,s,i={},n=!1){let r,l={};if(s instanceof Array){if(null==s[0].className||"Merge"===s[0].className)throw new a("Legacy serialization format not supported yet.");r=s}else e.util.assert(null!=s.layers,(()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field.")),r=s.layers,delete s.layers,l=s;const u=new t(l);if(!(u instanceof Fs))throw new o(`Sequential.fromConfig called on non-Sequential input: ${u}`);for(const t of r){const e=De(t,void 0,n);n&&e.setFastWeightInitDuringBuild(!0),u.add(e)}return u}set stopTraining(t){if(null==this.model)throw new a("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=t}get stopTraining(){if(null==this.model)throw new a("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const t=[];for(const e of this.layers){const s={};s.className=e.getClassName(),s.config=e.getConfig(),t.push(s)}return{name:this.name,layers:t}}}function Os(t){return ee(t)}Fs.className="Sequential",e.serialization.registerClass(Fs);class Rs extends e.serialization.Serializable{getConfig(){return{}}}class _s extends Rs{apply(t,e=1){return function(t,e=1){if(1!==e)throw new o(`Support for alpha values other than 1 (${e}) is not implemented yet.`);return i.elu(t)}(t,e)}}_s.className="elu",e.serialization.registerClass(_s);class Ms extends Rs{apply(t){return i.selu(t)}}Ms.className="selu",e.serialization.registerClass(Ms);class Bs extends Rs{apply(t){return i.relu(t)}}Bs.className="relu",e.serialization.registerClass(Bs);class Ws extends Rs{apply(t){return e.tidy((()=>i.minimum(6,i.relu(t))))}}Ws.className="relu6",e.serialization.registerClass(Ws);class Us extends Rs{apply(t){return t}}Us.className="linear",e.serialization.registerClass(Us);class Ps extends Rs{apply(t){return i.sigmoid(t)}}Ps.className="sigmoid",e.serialization.registerClass(Ps);class js extends Rs{apply(t){return function(t){return e.tidy((()=>{const e=i.add(.5,i.mul(.2,t));return i.clipByValue(e,0,1)}))}(t)}}js.className="hardSigmoid",e.serialization.registerClass(js);class Vs extends Rs{apply(t){return i.softplus(t)}}Vs.className="softplus",e.serialization.registerClass(Vs);class qs extends Rs{apply(t){return function(t){return e.tidy((()=>i.div(t,i.add(i.abs(t),1))))}(t)}}qs.className="softsign",e.serialization.registerClass(qs);class Js extends Rs{apply(t){return i.tanh(t)}}Js.className="tanh",e.serialization.registerClass(Js);class Hs extends Rs{apply(t,e=-1){return i.softmax(t,e)}}Hs.className="softmax",e.serialization.registerClass(Hs);class Zs extends Rs{apply(t,e=-1){return i.logSoftmax(t,e)}}Zs.className="logSoftmax",e.serialization.registerClass(Zs);class Ks extends Rs{apply(t,s=1){return e.tidy((()=>i.mul(i.sigmoid(i.mul(t,s)),t)))}}Ks.className="swish",e.serialization.registerClass(Ks);class Gs extends Rs{apply(t){return e.tidy((()=>i.mul(t,i.tanh(i.softplus(t)))))}}function Ys(t){return t.getClassName()}function Xs(t,s={}){return z(t,e.serialization.SerializationMap.getMap().classNameMap,s,"activation")}function Qs(t){if(null==t){const t={className:"linear",config:{}};return Xs(t)}if("string"==typeof t){const e={};return e.className=t,e.config={},Xs(e)}return t instanceof Rs?t:Xs(t)}function ti(t){if(null!=t&&"object"!=typeof t)throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${t}`)}Gs.className="mish",e.serialization.registerClass(Gs);class ei extends e.serialization.Serializable{}class si extends ei{constructor(t){super(),ti(t),this.l1=null==t||null==t.l1?.01:t.l1,this.l2=null==t||null==t.l2?.01:t.l2,this.hasL1=0!==this.l1,this.hasL2=0!==this.l2}apply(t){return e.tidy((()=>{let s=e.zeros([1]);return this.hasL1&&(s=e.add(s,e.sum(i.mul(this.l1,e.abs(t))))),this.hasL2&&(s=e.add(s,e.sum(i.mul(this.l2,ct(t))))),i.reshape(s,[])}))}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(t,e){return new t({l1:e.l1,l2:e.l2})}}si.className="L1L2",e.serialization.registerClass(si);const ii={l1l2:"L1L2"};function ni(t){return b(t)}function ri(t,s={}){return z(t,e.serialization.SerializationMap.getMap().classNameMap,s,"regularizer")}function ai(t){if(null==t)return null;if("string"==typeof t){return ri({className:t in ii?ii[t]:t,config:{}})}return t instanceof ei?t:ri(t)}class oi extends Xt{constructor(t){super(null==t?{}:t),this.supportsMasking=!0,null!=t&&(this.maxValue=t.maxValue)}call(t,s){t=Wt(t);let i=e.relu(t);return null!=this.maxValue&&(i=e.clipByValue(i,0,this.maxValue)),i}computeOutputShape(t){return t}getConfig(){const t={maxValue:this.maxValue},e=super.getConfig();return Object.assign(t,e),t}}oi.className="ReLU",e.serialization.registerClass(oi);class li extends Xt{constructor(t){super(null==t?{}:t),this.DEFAULT_ALPHA=.3,null==t&&(t={}),this.alpha=null==t.alpha?this.DEFAULT_ALPHA:t.alpha}call(t,s){const i=Wt(t);return e.leakyRelu(i,this.alpha)}computeOutputShape(t){return t}getConfig(){const t={alpha:this.alpha},e=super.getConfig();return Object.assign(t,e),t}}li.className="LeakyReLU",e.serialization.registerClass(li);class ui extends Xt{constructor(t){if(super(null==t?{}:t),this.DEFAULT_ALPHA_INITIALIZER="zeros",null==t&&(t={}),this.supportsMasking=!0,this.alphaInitializer=_t(t.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=ai(t.alphaRegularizer),this.alphaConstraint=be(t.alphaConstraint),null==t.sharedAxes)this.sharedAxes=null;else if(Array.isArray(t.sharedAxes))this.sharedAxes=t.sharedAxes;else{if("number"!=typeof t.sharedAxes)throw new a(`Expected sharedAxes to be a number or an array of numbers, but got ${t.sharedAxes}`);this.sharedAxes=[t.sharedAxes]}}build(t){const e=(t=Ut(t)).slice(1);if(null!=this.sharedAxes)for(const t of this.sharedAxes)e[t-1]=1;this.alpha=this.addWeight("alpha",e,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const s={};if(null!=this.sharedAxes)for(let e=1;e<t.length;++e)s[e]=t[e];this.inputSpec=[new Ht({ndim:t.length,axes:s})],this.built=!0}call(t,s){return t=Wt(t),e.prelu(t,this.alpha.read())}getConfig(){const t={alphaInitializer:Rt(this.alphaInitializer),alphaRegularizer:ni(this.alphaRegularizer),alphaConstraint:me(this.alphaConstraint),sharedAxes:this.sharedAxes},e=super.getConfig();return Object.assign(t,e),t}}ui.className="PReLU",e.serialization.registerClass(ui);class hi extends Xt{constructor(t){if(super(null==t?{}:t),this.DEFAULT_ALPHA=1,null==t&&(t={}),null!=t.alpha&&t.alpha!==this.DEFAULT_ALPHA)throw new o(`Non-default alpha value (${t.alpha}) is not supported by the ELU layer yet.`);this.alpha=null==t.alpha?this.DEFAULT_ALPHA:t.alpha}call(t,s){const i=Wt(t);return e.elu(i)}computeOutputShape(t){return t}getConfig(){const t={alpha:this.alpha},e=super.getConfig();return Object.assign(t,e),t}}hi.className="ELU",e.serialization.registerClass(hi);class ci extends Xt{constructor(t){super(null==t?{}:t),this.DEFAULT_THETA=1,null==t&&(t={}),this.theta=null==t.theta?this.DEFAULT_THETA:t.theta}call(t,s){const i=Wt(t);return e.mul(i,e.cast(e.greater(i,this.theta),"float32"))}computeOutputShape(t){return t}getConfig(){const t={theta:this.theta},e=super.getConfig();return Object.assign(t,e),t}}ci.className="ThresholdedReLU",e.serialization.registerClass(ci);class pi extends Xt{constructor(t){super(null==t?{}:t),this.DEFAULT_AXIS=1,null==t&&(t={}),this.softmax=(new Hs).apply,this.axis=null==t.axis?this.DEFAULT_AXIS:t.axis}call(t,e){const s=Wt(t);return this.softmax(s,this.axis)}computeOutputShape(t){return t}getConfig(){const t={axis:this.axis},e=super.getConfig();return Object.assign(t,e),t}}function di(t,e,s){if("number"==typeof t)return h(t,e);if(t.length!==e)throw new a(`The ${s} argument must be an integer or tuple of ${e} integers. Received: ${t.length} elements.`);for(let n=0;n<e;++n){const r=t[n];if((i=r)!==parseInt(i.toString(),10))throw new a(`The ${s} argument must be an integer or tuple of ${e} integers. Received: ${JSON.stringify(t)} including a non-integer number ${r}`)}return t;var i}function gi(t,e,s,i,n=1){if(null==t)return t;let r;return r="same"===s?t:t-(e+(e-1)*(n-1))+1,Math.floor((r+i-1)/i)}function fi(t,e,s,i){if(null==t)return null;if("valid"===i)t=t*e+G([s-e,0]);else{if("same"!==i)throw new a(`Unsupport padding mode: ${i}.`);t*=e}return t}function mi(t,s){return e.tidy((()=>(B(s),"channelsFirst"===s?i.transpose(t,[0,2,3,1]):t)))}function yi(t,s){return e.tidy((()=>(B(s),"channelsFirst"===s?i.transpose(t,[0,2,3,4,1]):t)))}function bi(t,s,n,r=[1,1],l="valid",u,h,c=null){return e.tidy((()=>{if(null==u&&(u="channelsLast"),B(u),3!==t.rank&&4!==t.rank)throw new a(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${t.rank}.`);if(3!==s.rank&&4!==s.rank)throw new a(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${t.rank}.`);let e=mi(t,u);if("causal"===l)throw new o("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return e=i.fused.conv2d({x:e,filter:s,strides:r,pad:"same"===l?"same":"valid",dilations:h,dataFormat:"NHWC",bias:n,activation:c}),"channelsFirst"===u&&(e=i.transpose(e,[0,3,1,2])),e}))}pi.className="Softmax",e.serialization.registerClass(pi);class wi extends Xt{constructor(t,e){if(super(e),this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",wi.verifyArgs(e),this.rank=t,A(this.rank,"rank"),1!==this.rank&&2!==this.rank&&3!==this.rank)throw new o(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=di(e.kernelSize,t,"kernelSize"),this.strides=di(null==e.strides?1:e.strides,t,"strides"),this.padding=null==e.padding?"valid":e.padding,W(this.padding),this.dataFormat=null==e.dataFormat?"channelsLast":e.dataFormat,B(this.dataFormat),this.activation=Qs(e.activation),this.useBias=null==e.useBias||e.useBias,this.biasInitializer=_t(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=be(e.biasConstraint),this.biasRegularizer=ai(e.biasRegularizer),this.activityRegularizer=ai(e.activityRegularizer),this.dilationRate=di(null==e.dilationRate?1:e.dilationRate,t,"dilationRate"),1===this.rank&&Array.isArray(this.dilationRate)&&1!==this.dilationRate.length)throw new a(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(2===this.rank){if("number"==typeof this.dilationRate)this.dilationRate=[this.dilationRate,this.dilationRate];else if(2!==this.dilationRate.length)throw new a(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(3===this.rank)if("number"==typeof this.dilationRate)this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(3!==this.dilationRate.length)throw new a(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}static verifyArgs(t){if(c("kernelSize"in t,"required key 'kernelSize' not in config"),"number"!=typeof t.kernelSize&&!C(t.kernelSize,"number",1,3))throw new a(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(t.kernelSize)}.`)}getConfig(){const t={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:Ys(this.activation),useBias:this.useBias,biasInitializer:Rt(this.biasInitializer),biasRegularizer:ni(this.biasRegularizer),activityRegularizer:ni(this.activityRegularizer),biasConstraint:me(this.biasConstraint)},e=super.getConfig();return Object.assign(t,e),t}}class zi extends wi{constructor(t,e){super(t,e),this.kernel=null,zi.verifyArgs(e),this.filters=e.filters,A(this.filters,"filters"),this.kernelInitializer=_t(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=be(e.kernelConstraint),this.kernelRegularizer=ai(e.kernelRegularizer)}build(t){t=Ut(t);const e="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[e])throw new a(`The channel dimension of the input should be defined. Found ${t[e]}`);const s=t[e],i=this.kernelSize.concat([s,this.filters]);this.kernel=this.addWeight("kernel",i,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[e]:s}}],this.built=!0}call(t,s){return e.tidy((()=>{let s;t=Wt(t);const n=null==this.bias?null:this.bias.read(),r=I(this.activation.getClassName());if(null!=r&&2===this.rank)s=bi(t,this.kernel.read(),n,this.strides,this.padding,this.dataFormat,this.dilationRate,r);else{if(1===this.rank)s=function(t,s,n,r=1,l="valid",u,h=1){return e.tidy((()=>{if(null==u&&(u="channelsLast"),B(u),3!==t.shape.length)throw new a(`The input of a conv1dWithBias operation should be 3, but is ${t.shape.length} instead.`);if(3!==s.shape.length)throw new a(`The kernel for a conv1dWithBias operation should be 3, but is ${s.shape.length} instead`);if(null!=n&&1!==n.shape.length)throw new a(`The bias for a conv1dWithBias operation should be 1, but is ${s.shape.length} instead`);if("channelsFirst"===u&&(t=i.transpose(t,[0,2,1])),"causal"===l)throw new o("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let e=i.conv1d(t,s,r,"same"===l?"same":"valid","NWC",h);return null!=n&&(e=dt(e,n)),e}))}(t,this.kernel.read(),n,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(2===this.rank)s=bi(t,this.kernel.read(),n,this.strides,this.padding,this.dataFormat,this.dilationRate);else{if(3!==this.rank)throw new o("convolutions greater than 3D are not implemented yet.");s=function(t,s,n,r=[1,1,1],l="valid",u,h){return e.tidy((()=>{if(null==u&&(u="channelsLast"),B(u),4!==t.rank&&5!==t.rank)throw new a(`conv3dWithBias expects input to be of rank 4 or 5, but received ${t.rank}.`);if(4!==s.rank&&5!==s.rank)throw new a(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${t.rank}.`);let e=yi(t,u);if("causal"===l)throw new o("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return e=i.conv3d(e,s,r,"same"===l?"same":"valid","NDHWC",h),null!=n&&(e=dt(e,n)),"channelsFirst"===u&&(e=i.transpose(e,[0,4,1,2,3])),e}))}(t,this.kernel.read(),n,this.strides,this.padding,this.dataFormat,this.dilationRate)}null!=this.activation&&(s=this.activation.apply(s))}return s}))}computeOutputShape(t){t=Ut(t);const e=[],s="channelsLast"===this.dataFormat?t.slice(1,t.length-1):t.slice(2);for(let t=0;t<s.length;++t){const i=gi(s[t],this.kernelSize[t],this.padding,this.strides[t],"number"==typeof this.dilationRate?this.dilationRate:this.dilationRate[t]);e.push(i)}let i=[t[0]];return"channelsLast"===this.dataFormat?(i=i.concat(e),i.push(this.filters)):(i.push(this.filters),i=i.concat(e)),i}getConfig(){const t={filters:this.filters,kernelInitializer:Rt(this.kernelInitializer),kernelRegularizer:ni(this.kernelRegularizer),kernelConstraint:me(this.kernelConstraint)},e=super.getConfig();return Object.assign(t,e),t}static verifyArgs(t){if(!("filters"in t)||"number"!=typeof t.filters||t.filters<1)throw new a(`Convolution layer expected config.filters to be a 'number' > 0 but got ${JSON.stringify(t.filters)}`)}}class ki extends zi{constructor(t){super(2,t),ki.verifyArgs(t)}getConfig(){const t=super.getConfig();return delete t.rank,t}static verifyArgs(t){if("number"!=typeof t.kernelSize&&!C(t.kernelSize,"number",1,2))throw new a(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(t.kernelSize)}.`)}}ki.className="Conv2D",e.serialization.registerClass(ki);class Si extends zi{constructor(t){super(3,t),Si.verifyArgs(t)}getConfig(){const t=super.getConfig();return delete t.rank,t}static verifyArgs(t){if("number"!=typeof t.kernelSize&&(!Array.isArray(t.kernelSize)||1!==t.kernelSize.length&&3!==t.kernelSize.length))throw new a(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(t.kernelSize)}.`)}}Si.className="Conv3D",e.serialization.registerClass(Si);class vi extends ki{constructor(t){if(super(t),this.inputSpec=[new Ht({ndim:4})],"same"!==this.padding&&"valid"!==this.padding)throw new a(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(t){if(4!==(t=Ut(t)).length)throw new a("Input should have rank 4; Received input shape: "+JSON.stringify(t));const e="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[e])throw new a("The channel dimension of the inputs should be defined. Found `None`.");const s=t[e],i=this.kernelSize.concat([this.filters,s]);this.kernel=this.addWeight("kernel",i,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new Ht({ndim:4,axes:{[e]:s}})],this.built=!0}call(t,e){return i.tidy((()=>{let e=Wt(t);if(4!==e.shape.length)throw new a(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${e.shape.length}`);const s=e.shape,n=s[0];let r,o;"channelsFirst"===this.dataFormat?(r=2,o=3):(r=1,o=2);const l=s[r],u=s[o],h=this.kernelSize[0],c=this.kernelSize[1],p=this.strides[0],d=this.strides[1],g=[n,fi(l,p,h,this.padding),fi(u,d,c,this.padding),this.filters];"channelsLast"!==this.dataFormat&&(e=i.transpose(e,[0,2,3,1]));let f=i.conv2dTranspose(e,this.kernel.read(),g,this.strides,this.padding);return"channelsLast"!==this.dataFormat&&(f=i.transpose(f,[0,3,1,2])),null!=this.bias&&(f=dt(f,this.bias.read(),this.dataFormat)),null!=this.activation&&(f=this.activation.apply(f)),f}))}computeOutputShape(t){const e=(t=Ut(t)).slice();let s,i,n;"channelsFirst"===this.dataFormat?(s=1,i=2,n=3):(s=3,i=1,n=2);const r=this.kernelSize[0],a=this.kernelSize[1],o=this.strides[0],l=this.strides[1];return e[s]=this.filters,e[i]=fi(e[i],o,r,this.padding),e[n]=fi(e[n],l,a,this.padding),e}getConfig(){const t=super.getConfig();return delete t.dilationRate,t}}vi.className="Conv2DTranspose",e.serialization.registerClass(vi);class Ni extends Si{constructor(t){if(super(t),this.inputSpec=[new Ht({ndim:5})],"same"!==this.padding&&"valid"!==this.padding)throw new a(`Conv3DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(t){if(5!==(t=Ut(t)).length)throw new a("Input should have rank 5; Received input shape: "+JSON.stringify(t));const e="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[e])throw new a("The channel dimension of the inputs should be defined. Found `None`.");const s=t[e],i=this.kernelSize.concat([this.filters,s]);this.kernel=this.addWeight("kernel",i,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new Ht({ndim:5,axes:{[e]:s}})],this.built=!0}call(t,e){return i.tidy((()=>{let e=Wt(t);if(5!==e.shape.length)throw new a(`Conv3DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${e.shape.length}`);const s=e.shape,n=s[0];let r,o,l;"channelsFirst"===this.dataFormat?(l=2,r=3,o=4):(l=1,r=2,o=3);const u=s[l],h=s[r],c=s[o],p=this.kernelSize[0],d=this.kernelSize[1],g=this.kernelSize[2],f=this.strides[0],m=this.strides[1],y=this.strides[2],b=[n,fi(u,f,p,this.padding),fi(h,m,d,this.padding),fi(c,y,g,this.padding),this.filters];"channelsLast"!==this.dataFormat&&(e=i.transpose(e,[0,2,3,4,1]));let w=i.conv3dTranspose(e,this.kernel.read(),b,this.strides,this.padding);return"channelsLast"!==this.dataFormat&&(w=i.transpose(w,[0,4,1,2,3])),null!==this.bias&&(w=dt(w,this.bias.read(),this.dataFormat)),null!==this.activation&&(w=this.activation.apply(w)),w}))}computeOutputShape(t){const e=(t=Ut(t)).slice();let s,i,n,r;"channelsFirst"===this.dataFormat?(s=1,i=2,n=3,r=4):(s=4,i=1,n=2,r=3);const a=this.kernelSize[0],o=this.kernelSize[1],l=this.kernelSize[2],u=this.strides[0],h=this.strides[1],c=this.strides[2];return e[s]=this.filters,e[i]=fi(e[i],u,a,this.padding),e[n]=fi(e[n],h,o,this.padding),e[r]=fi(e[r],c,l,this.padding),e}getConfig(){const t=super.getConfig();return delete t.dilationRate,t}}Ni.className="Conv3DTranspose",e.serialization.registerClass(Ni);class Ci extends zi{constructor(t,e){if(super(t,e),this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,null==e.filters)throw new a("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(null!=e.kernelInitializer||null!=e.kernelRegularizer||null!=e.kernelConstraint)throw new a("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(null!=e.padding&&"same"!==e.padding&&"valid"!==e.padding)throw new a(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(e.padding)}`);this.depthMultiplier=null==e.depthMultiplier?1:e.depthMultiplier,this.depthwiseInitializer=_t(e.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=ai(e.depthwiseRegularizer),this.depthwiseConstraint=be(e.depthwiseConstraint),this.pointwiseInitializer=_t(e.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=ai(e.pointwiseRegularizer),this.pointwiseConstraint=be(e.pointwiseConstraint)}build(t){if((t=Ut(t)).length<this.rank+2)throw new a(`Inputs to SeparableConv${this.rank}D should have rank ${this.rank+2}, but received input shape: ${JSON.stringify(t)}`);const e="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[e]||t[e]<0)throw new a(`The channel dimension of the inputs should be defined, but found ${JSON.stringify(t[e])}`);const s=t[e],i=this.kernelSize.concat([s,this.depthMultiplier]),n=[];for(let t=0;t<this.rank;++t)n.push(1);n.push(s*this.depthMultiplier,this.filters);const r=!0;this.depthwiseKernel=this.addWeight("depthwise_kernel",i,"float32",this.depthwiseInitializer,this.depthwiseRegularizer,r,this.depthwiseConstraint),this.pointwiseKernel=this.addWeight("pointwise_kernel",n,"float32",this.pointwiseInitializer,this.pointwiseRegularizer,r,this.pointwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,r,this.biasConstraint):this.bias=null,this.inputSpec=[new Ht({ndim:this.rank+2,axes:{[e]:s}})],this.built=!0}call(t,s){return e.tidy((()=>{let e;if(t=Wt(t),1===this.rank)throw new o("1D separable convolution is not implemented yet.");return 2===this.rank&&("channelsFirst"===this.dataFormat&&(t=i.transpose(t,[0,2,3,1])),e=i.separableConv2d(t,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(e=dt(e,this.bias.read(),this.dataFormat)),null!=this.activation&&(e=this.activation.apply(e)),"channelsFirst"===this.dataFormat&&(e=i.transpose(e,[0,3,1,2])),e}))}getConfig(){const t=super.getConfig();return delete t.rank,delete t.kernelInitializer,delete t.kernelRegularizer,delete t.kernelConstraint,t.depthwiseInitializer=Rt(this.depthwiseInitializer),t.pointwiseInitializer=Rt(this.pointwiseInitializer),t.depthwiseRegularizer=ni(this.depthwiseRegularizer),t.pointwiseRegularizer=ni(this.pointwiseRegularizer),t.depthwiseConstraint=me(this.depthwiseConstraint),t.pointwiseConstraint=me(this.pointwiseConstraint),t}}Ci.className="SeparableConv";class Ai extends Ci{constructor(t){super(2,t)}}Ai.className="SeparableConv2D",e.serialization.registerClass(Ai);class xi extends zi{constructor(t){super(1,t),xi.verifyArgs(t),this.inputSpec=[{ndim:3}]}getConfig(){const t=super.getConfig();return delete t.rank,delete t.dataFormat,t}static verifyArgs(t){if("number"!=typeof t.kernelSize&&!C(t.kernelSize,"number",1,1))throw new a(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(t.kernelSize)}.`)}}xi.className="Conv1D",e.serialization.registerClass(xi);class Ii extends Xt{constructor(t){super(t),"number"==typeof t.cropping?this.cropping=[[t.cropping,t.cropping],[t.cropping,t.cropping]]:"number"==typeof t.cropping[0]?this.cropping=[[t.cropping[0],t.cropping[0]],[t.cropping[1],t.cropping[1]]]:this.cropping=t.cropping,this.dataFormat=void 0===t.dataFormat?"channelsLast":t.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(t){return"channelsFirst"===this.dataFormat?[t[0],t[1],t[2]-this.cropping[0][0]-this.cropping[0][1],t[3]-this.cropping[1][0]-this.cropping[1][1]]:[t[0],t[1]-this.cropping[0][0]-this.cropping[0][1],t[2]-this.cropping[1][0]-this.cropping[1][1],t[3]]}call(t,s){return e.tidy((()=>{if(t=Wt(t),"channelsLast"===this.dataFormat){const e=nt(t,this.cropping[0][0],t.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return nt(e,this.cropping[1][0],t.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}{const e=nt(t,this.cropping[0][0],t.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return nt(e,this.cropping[1][0],t.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}}))}getConfig(){const t={cropping:this.cropping,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}Ii.className="Cropping2D",e.serialization.registerClass(Ii);class Li extends Xt{constructor(t){var e;super(t),this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=null==t.size?this.DEFAULT_SIZE:t.size,this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,B(this.dataFormat),this.interpolation=null==t.interpolation?"nearest":t.interpolation,e=this.interpolation,N(F,"InterpolationFormat",e)}computeOutputShape(t){if("channelsFirst"===this.dataFormat){const e=null==t[2]?null:this.size[0]*t[2],s=null==t[3]?null:this.size[1]*t[3];return[t[0],t[1],e,s]}{const e=null==t[1]?null:this.size[0]*t[1],s=null==t[2]?null:this.size[1]*t[2];return[t[0],e,s,t[3]]}}call(t,e){return i.tidy((()=>{let e=Wt(t);const s=e.shape;if("channelsFirst"===this.dataFormat){e=i.transpose(e,[0,2,3,1]);const t=this.size[0]*s[2],n=this.size[1]*s[3],r="nearest"===this.interpolation?i.image.resizeNearestNeighbor(e,[t,n]):i.image.resizeBilinear(e,[t,n]);return i.transpose(r,[0,3,1,2])}{const t=this.size[0]*s[1],n=this.size[1]*s[2];return"nearest"===this.interpolation?i.image.resizeNearestNeighbor(e,[t,n]):i.image.resizeBilinear(e,[t,n])}}))}getConfig(){const t={size:this.size,dataFormat:this.dataFormat,interpolation:this.interpolation},e=super.getConfig();return Object.assign(t,e),t}}Li.className="UpSampling2D",e.serialization.registerClass(Li);class Ti extends wi{constructor(t){super(2,t),this.depthwiseKernel=null,this.depthMultiplier=null==t.depthMultiplier?1:t.depthMultiplier,this.depthwiseInitializer=_t(t.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=be(t.depthwiseConstraint),this.depthwiseRegularizer=ai(t.depthwiseRegularizer)}build(t){if((t=Ut(t)).length<4)throw new a(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(t)}.`);const e="channelsFirst"===this.dataFormat?1:3;if(null==t[e]||t[e]<0)throw new a(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${t[e]}).`);const s=t[e],i=[this.kernelSize[0],this.kernelSize[1],s,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",i,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[s*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,s){return e.tidy((()=>{let s=function(t,s,n=[1,1],r="valid",o,l){return e.tidy((()=>{null==o&&(o="channelsLast"),B(o);let e=mi(t,o);if(4!==t.rank)throw new a(`Input for depthwiseConv2d is required to be 4-D, but is instead ${t.rank}-D`);if(4!==s.rank)throw new a(`depthwiseKernel is required to be 4-D, but is instead ${s.rank}-D`);return e=i.depthwiseConv2d(e,s,n,"same"===r?"same":"valid","NHWC",l),"channelsFirst"===o&&(e=i.transpose(e,[0,3,1,2])),e}))}(t=Wt(t),this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(s=dt(s,this.bias.read(),this.dataFormat)),null!=this.activation&&(s=this.activation.apply(s)),s}))}computeOutputShape(t){t=Ut(t);const e="channelsFirst"===this.dataFormat?t[2]:t[1],s="channelsFirst"===this.dataFormat?t[3]:t[2],i="channelsFirst"===this.dataFormat?t[1]*this.depthMultiplier:t[3]*this.depthMultiplier,n=gi(e,this.kernelSize[0],this.padding,this.strides[0]),r=gi(s,this.kernelSize[1],this.padding,this.strides[1]);return"channelsFirst"===this.dataFormat?[t[0],i,n,r]:[t[0],n,r,i]}getConfig(){const t=super.getConfig();return t.depthMultiplier=this.depthMultiplier,t.depthwiseInitializer=Rt(this.depthwiseInitializer),t.depthwiseRegularizer=ni(this.depthwiseRegularizer),t.depthwiseConstraint=me(this.depthwiseRegularizer),t}}function Ei(t,e,s,i){if(Array.isArray(t)){if(null!=e||null!=s)throw new a("When inputs is an array, neither initialState or constants should be provided");null!=i&&(s=t.slice(t.length-i,t.length),t=t.slice(0,t.length-i)),t.length>1&&(e=t.slice(1,t.length)),t=t[0]}function n(t){return null==t||Array.isArray(t)?t:[t]}return{inputs:t,initialState:e=n(e),constants:s=n(s)}}function Di(t,e,s,n=!1,r,l,u=!1,h=!1){return i.tidy((()=>{const c=e.shape.length;if(c<3)throw new a(`Input should be at least 3D, but is ${c}D.`);const p=[1,0].concat(Y(2,c));if(e=i.transpose(e,p),null!=l)throw new o("The rnn() functoin of the deeplearn.js backend does not support constants yet.");u&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),null!=r&&((r=i.cast(i.cast(r,"bool"),"float32")).rank===c-1&&(r=i.expandDims(r,-1)),r=i.transpose(r,p)),n&&(e=i.reverse(e,0),null!=r&&(r=i.reverse(r,0)));const d=[];let g,f=s;const m=e.shape[0],y=i.unstack(e);let b,w;null!=r&&(b=i.unstack(r));for(let e=0;e<m;++e){const s=y[e],n=i.tidy((()=>t(s,f)));if(null==r)g=n[0],f=n[1];else{const t=i.tidy((()=>{const t=b[e],s=i.sub(i.onesLike(t),t);return{output:i.add(i.mul(n[0],t),i.mul(f[0],s)),newStates:f.map(((e,r)=>i.add(i.mul(n[1][r],t),i.mul(e,s))))}}));g=t.output,f=t.newStates}h&&d.push(g)}if(h){const t=1;w=i.stack(d,t)}return[g,w,f]}))}Ti.className="DepthwiseConv2D",e.serialization.registerClass(Ti);class $i extends Xt{constructor(t){let e;if(super(t),null==t.cell)throw new a("cell property is missing for the constructor of RNN.");if(e=Array.isArray(t.cell)?new Ui({cells:t.cell}):t.cell,null==e.stateSize)throw new a("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=e,this.returnSequences=null!=t.returnSequences&&t.returnSequences,this.returnState=null!=t.returnState&&t.returnState,this.goBackwards=null!=t.goBackwards&&t.goBackwards,this._stateful=null!=t.stateful&&t.stateful,this.unroll=null!=t.unroll&&t.unroll,this.supportsMasking=!0,this.inputSpec=[new Ht({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(null==this.states_){return Y(0,Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1).map((t=>null))}return this.states_}setStates(t){this.states_=t}computeOutputShape(t){Mt(t)&&(t=t[0]);let e=this.cell.stateSize;Array.isArray(e)||(e=[e]);const s=e[0];let i;if(i=this.returnSequences?[t[0],t[1],s]:[t[0],s],this.returnState){const s=[];for(const i of e)s.push([t[0],i]);return[i].concat(s)}return i}computeMask(t,e){return i.tidy((()=>{Array.isArray(e)&&(e=e[0]);const t=this.returnSequences?e:null;if(this.returnState){const e=this.states.map((t=>null));return[t].concat(e)}return t}))}get states(){if(null==this.states_){const t=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,e=[];for(let s=0;s<t;++s)e.push(null);return e}return this.states_}set states(t){this.states_=t}build(t){if(null!=this.numConstants)throw new o("Constants support is not implemented in RNN yet.");Mt(t)&&(t=t[0]);const s=this.stateful?t[0]:null,i=t.slice(2);this.inputSpec[0]=new Ht({shape:[s,null,...i]});const n=[t[0]].concat(t.slice(2));let r;if(this.cell.build(n),r=Array.isArray(this.cell.stateSize)?this.cell.stateSize:[this.cell.stateSize],null!=this.stateSpec){if(!e.util.arraysEqual(this.stateSpec.map((t=>t.shape[t.shape.length-1])),r))throw new a(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=r.map((t=>new Ht({shape:[null,t]})));this.stateful&&this.resetStates()}resetStates(t,s=!1){e.tidy((()=>{if(!this.stateful)throw new n("Cannot call resetStates() on an RNN Layer that is not stateful.");const r=this.inputSpec[0].shape[0];if(null==r)throw new a("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(null==this.states_)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((t=>i.zeros([r,t]))):this.states_=[i.zeros([r,this.cell.stateSize])];else if(null==t)i.dispose(this.states_),null!=this.keptStates&&(i.dispose(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((t=>i.zeros([r,t]))):this.states_[0]=i.zeros([r,this.cell.stateSize]);else{if(Array.isArray(t)||(t=[t]),t.length!==this.states_.length)throw new a(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${t.length} state value(s). Input received: ${t}`);!0===s?this.keptStates.push(this.states_.slice()):i.dispose(this.states_);for(let s=0;s<this.states_.length;++s){const i=t[s],n=Array.isArray(this.cell.stateSize)?this.cell.stateSize[s]:this.cell.stateSize,o=[r,n];if(!e.util.arraysEqual(i.shape,o))throw new a(`State ${s} is incompatible with layer ${this.name}: expected shape=${o}, received shape=${i.shape}`);this.states_[s]=i}}this.states_=this.states_.map((t=>i.keep(t.clone())))}))}apply(t,e){let s=null==e?null:e.initialState,i=null==e?null:e.constants;null==e&&(e={});const n=Ei(t,s,i,this.numConstants);t=n.inputs,s=n.initialState,i=n.constants;let r=[],a=[];if(null!=s){e.initialState=s,r=r.concat(s),this.stateSpec=[];for(const t of s)this.stateSpec.push(new Ht({shape:t.shape}));a=a.concat(this.stateSpec)}null!=i&&(e.constants=i,r=r.concat(i),this.numConstants=i.length);if(r[0]instanceof Zt){const s=[t].concat(r),i=this.inputSpec.concat(a),n=this.inputSpec;this.inputSpec=i;const o=super.apply(s,e);return this.inputSpec=n,o}return super.apply(t,e)}call(t,s){return e.tidy((()=>{const e=null==s?null:s.mask,i=null==s?null:s.training;let n=null==s?null:s.initialState;t=Wt(t),null==n&&(n=this.stateful?this.states_:this.getInitialState(t));const r=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(n.length!==r)throw new a(`RNN Layer has ${r} state(s) but was passed ${n.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const o={training:i},l=Di(((t,e)=>{const s=this.cell.call([t].concat(e),o);return[s[0],s.slice(1)]}),t,n,this.goBackwards,e,null,this.unroll,this.returnSequences),u=l[0],h=l[1],c=l[2];this.stateful&&this.resetStates(c,i);const p=this.returnSequences?h:u;return this.returnState?[p].concat(c):p}))}getInitialState(t){return e.tidy((()=>{let e=i.zeros(t.shape);return e=i.sum(e,[1,2]),e=et(e),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map((t=>t>1?ot(e,[1,t]):e)):this.cell.stateSize>1?[ot(e,[1,this.cell.stateSize])]:[e]}))}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),null!=this.cell&&this.cell.setFastWeightInitDuringBuild(t)}getConfig(){const t=super.getConfig(),e={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};null!=this.numConstants&&(e.numConstants=this.numConstants);const s=this.cell.getConfig();return this.getClassName()===$i.className&&(e.cell={className:this.cell.getClassName(),config:s}),Object.assign({},s,t,e)}static fromConfig(t,e,s={}){const i=De(e.cell,s);return new t(Object.assign(e,{cell:i}))}}$i.className="RNN",e.serialization.registerClass($i);class Fi extends Xt{}class Oi extends Fi{constructor(t){super(t),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=t.units,A(this.units,"units"),this.activation=Qs(null==t.activation?this.DEFAULT_ACTIVATION:t.activation),this.useBias=null==t.useBias||t.useBias,this.kernelInitializer=_t(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=_t(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=_t(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=ai(t.kernelRegularizer),this.recurrentRegularizer=ai(t.recurrentRegularizer),this.biasRegularizer=ai(t.biasRegularizer),this.kernelConstraint=be(t.kernelConstraint),this.recurrentConstraint=be(t.recurrentConstraint),this.biasConstraint=be(t.biasConstraint),this.dropout=K([1,G([0,null==t.dropout?0:t.dropout])]),this.recurrentDropout=K([1,G([0,null==t.recurrentDropout?0:t.recurrentDropout])]),this.dropoutFunc=t.dropoutFunc,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){t=Ut(t),this.kernel=this.addWeight("kernel",[t[t.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,s){return e.tidy((()=>{if(2!==t.length)throw new a(`SimpleRNNCell expects 2 input Tensors, got ${t.length}.`);let e=t[1];t=t[0];const n=null!=s.training&&s.training;let r;0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Pi({ones:()=>i.onesLike(t),rate:this.dropout,training:n,dropoutFunc:this.dropoutFunc})),0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Pi({ones:()=>i.onesLike(e),rate:this.recurrentDropout,training:n,dropoutFunc:this.dropoutFunc}));const o=this.dropoutMask,l=this.recurrentDropoutMask;r=ut(null!=o?i.mul(t,o):t,this.kernel.read()),null!=this.bias&&(r=dt(r,this.bias.read())),null!=l&&(e=i.mul(e,l));let u=i.add(r,ut(e,this.recurrentKernel.read()));return null!=this.activation&&(u=this.activation.apply(u)),[u,u]}))}getConfig(){const t=super.getConfig(),e={units:this.units,activation:Ys(this.activation),useBias:this.useBias,kernelInitializer:Rt(this.kernelInitializer),recurrentInitializer:Rt(this.recurrentInitializer),biasInitializer:Rt(this.biasInitializer),kernelRegularizer:ni(this.kernelRegularizer),recurrentRegularizer:ni(this.recurrentRegularizer),biasRegularizer:ni(this.biasRegularizer),activityRegularizer:ni(this.activityRegularizer),kernelConstraint:me(this.kernelConstraint),recurrentConstraint:me(this.recurrentConstraint),biasConstraint:me(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},t,e)}}Oi.className="SimpleRNNCell",e.serialization.registerClass(Oi);class Ri extends $i{constructor(t){t.cell=new Oi(t),super(t)}call(t,s){return e.tidy((()=>{null!=this.cell.dropoutMask&&(i.dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(i.dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const e=null==s?null:s.mask,n=null==s?null:s.training,r=null==s?null:s.initialState;return super.call(t,{mask:e,training:n,initialState:r})}))}static fromConfig(t,e){return new t(e)}}Ri.className="SimpleRNN",e.serialization.registerClass(Ri);class _i extends Fi{constructor(t){if(super(t),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",t.resetAfter)throw new a("GRUCell does not support reset_after parameter set to true.");this.units=t.units,A(this.units,"units"),this.activation=Qs(void 0===t.activation?this.DEFAULT_ACTIVATION:t.activation),this.recurrentActivation=Qs(void 0===t.recurrentActivation?this.DEFAULT_RECURRENT_ACTIVATION:t.recurrentActivation),this.useBias=null==t.useBias||t.useBias,this.kernelInitializer=_t(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=_t(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=_t(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=ai(t.kernelRegularizer),this.recurrentRegularizer=ai(t.recurrentRegularizer),this.biasRegularizer=ai(t.biasRegularizer),this.kernelConstraint=be(t.kernelConstraint),this.recurrentConstraint=be(t.recurrentConstraint),this.biasConstraint=be(t.biasConstraint),this.dropout=K([1,G([0,null==t.dropout?0:t.dropout])]),this.recurrentDropout=K([1,G([0,null==t.recurrentDropout?0:t.recurrentDropout])]),this.dropoutFunc=t.dropoutFunc,this.implementation=t.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){const e=(t=Ut(t))[t.length-1];this.kernel=this.addWeight("kernel",[e,3*this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,3*this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[3*this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,s){return e.tidy((()=>{if(2!==t.length)throw new a(`GRUCell expects 2 input Tensors (inputs, h, c), got ${t.length}.`);const e=null!=s.training&&s.training;let n=t[1];t=t[0],0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Pi({ones:()=>i.onesLike(t),rate:this.dropout,training:e,count:3,dropoutFunc:this.dropoutFunc})),0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Pi({ones:()=>i.onesLike(n),rate:this.recurrentDropout,training:e,count:3,dropoutFunc:this.dropoutFunc}));const r=this.dropoutMask,o=this.recurrentDropoutMask;let l,u,h;0<this.dropout&&this.dropout<1&&(t=i.mul(t,r[0]));let c=ut(t,this.kernel.read());this.useBias&&(c=dt(c,this.bias.read())),0<this.recurrentDropout&&this.recurrentDropout<1&&(n=i.mul(n,o[0]));const p=this.recurrentKernel.read(),[d,g]=i.split(p,[2*this.units,this.units],p.rank-1),f=ut(n,d),[m,y,b]=i.split(c,3,c.rank-1),[w,z]=i.split(f,2,f.rank-1);l=this.recurrentActivation.apply(i.add(m,w)),u=this.recurrentActivation.apply(i.add(y,z));const k=ut(i.mul(u,n),g);h=this.activation.apply(i.add(b,k));const S=i.add(i.mul(l,n),i.mul(i.add(1,i.neg(l)),h));return[S,S]}))}getConfig(){const t=super.getConfig(),e={units:this.units,activation:Ys(this.activation),recurrentActivation:Ys(this.recurrentActivation),useBias:this.useBias,kernelInitializer:Rt(this.kernelInitializer),recurrentInitializer:Rt(this.recurrentInitializer),biasInitializer:Rt(this.biasInitializer),kernelRegularizer:ni(this.kernelRegularizer),recurrentRegularizer:ni(this.recurrentRegularizer),biasRegularizer:ni(this.biasRegularizer),activityRegularizer:ni(this.activityRegularizer),kernelConstraint:me(this.kernelConstraint),recurrentConstraint:me(this.recurrentConstraint),biasConstraint:me(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout,implementation:this.implementation,resetAfter:!1};return Object.assign({},t,e)}}_i.className="GRUCell",e.serialization.registerClass(_i);class Mi extends $i{constructor(t){0===t.implementation&&console.warn("`implementation=0` has been deprecated, and now defaults to `implementation=1`. Please update your layer call."),t.cell=new _i(t),super(t)}call(t,s){return e.tidy((()=>{null!=this.cell.dropoutMask&&(i.dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(i.dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const e=null==s?null:s.mask,n=null==s?null:s.training,r=null==s?null:s.initialState;return super.call(t,{mask:e,training:n,initialState:r})}))}static fromConfig(t,e){return 0===e.implmentation&&(e.implementation=1),new t(e)}}Mi.className="GRU",e.serialization.registerClass(Mi);class Bi extends Fi{constructor(t){super(t),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=t.units,A(this.units,"units"),this.activation=Qs(void 0===t.activation?this.DEFAULT_ACTIVATION:t.activation),this.recurrentActivation=Qs(void 0===t.recurrentActivation?this.DEFAULT_RECURRENT_ACTIVATION:t.recurrentActivation),this.useBias=null==t.useBias||t.useBias,this.kernelInitializer=_t(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=_t(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=_t(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=t.unitForgetBias,this.kernelRegularizer=ai(t.kernelRegularizer),this.recurrentRegularizer=ai(t.recurrentRegularizer),this.biasRegularizer=ai(t.biasRegularizer),this.kernelConstraint=be(t.kernelConstraint),this.recurrentConstraint=be(t.recurrentConstraint),this.biasConstraint=be(t.biasConstraint),this.dropout=K([1,G([0,null==t.dropout?0:t.dropout])]),this.recurrentDropout=K([1,G([0,null==t.recurrentDropout?0:t.recurrentDropout])]),this.dropoutFunc=t.dropoutFunc,this.implementation=t.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){var e;const s=(t=Ut(t))[t.length-1];let i;if(this.kernel=this.addWeight("kernel",[s,4*this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,4*this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){if(this.unitForgetBias){const t=this.biasInitializer,s=this.units;i=new((e=class extends bt{apply(e,i){const n=t.apply([s]),r=(new zt).apply([s]),a=t.apply([2*s]);return at(at(n,r),a)}}).className="CustomInit",e)}else i=this.biasInitializer;this.bias=this.addWeight("bias",[4*this.units],null,i,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(t,s){return e.tidy((()=>{const e=null!=s.training&&s.training;if(3!==t.length)throw new a(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${t.length}.`);let n=t[1];const r=t[2];t=t[0],0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Pi({ones:()=>i.onesLike(t),rate:this.dropout,training:e,count:4,dropoutFunc:this.dropoutFunc})),0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Pi({ones:()=>i.onesLike(n),rate:this.recurrentDropout,training:e,count:4,dropoutFunc:this.dropoutFunc}));const o=this.dropoutMask,l=this.recurrentDropoutMask;let u,h,c,p;0<this.dropout&&this.dropout<1&&(t=i.mul(t,o[0]));let d=ut(t,this.kernel.read());0<this.recurrentDropout&&this.recurrentDropout<1&&(n=i.mul(n,l[0])),d=i.add(d,ut(n,this.recurrentKernel.read())),this.useBias&&(d=dt(d,this.bias.read()));const[g,f,m,y]=i.split(d,4,d.rank-1);u=this.recurrentActivation.apply(g),h=this.recurrentActivation.apply(f),c=i.add(i.mul(h,r),i.mul(u,this.activation.apply(m))),p=this.recurrentActivation.apply(y);const b=i.mul(p,this.activation.apply(c));return[b,b,c]}))}getConfig(){const t=super.getConfig(),e={units:this.units,activation:Ys(this.activation),recurrentActivation:Ys(this.recurrentActivation),useBias:this.useBias,kernelInitializer:Rt(this.kernelInitializer),recurrentInitializer:Rt(this.recurrentInitializer),biasInitializer:Rt(this.biasInitializer),unitForgetBias:this.unitForgetBias,kernelRegularizer:ni(this.kernelRegularizer),recurrentRegularizer:ni(this.recurrentRegularizer),biasRegularizer:ni(this.biasRegularizer),activityRegularizer:ni(this.activityRegularizer),kernelConstraint:me(this.kernelConstraint),recurrentConstraint:me(this.recurrentConstraint),biasConstraint:me(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout,implementation:this.implementation};return Object.assign({},t,e)}}Bi.className="LSTMCell",e.serialization.registerClass(Bi);class Wi extends $i{constructor(t){0===t.implementation&&console.warn("`implementation=0` has been deprecated, and now defaults to `implementation=1`. Please update your layer call."),t.cell=new Bi(t),super(t)}call(t,s){return e.tidy((()=>{null!=this.cell.dropoutMask&&(i.dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(i.dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const e=null==s?null:s.mask,n=null==s?null:s.training,r=null==s?null:s.initialState;return super.call(t,{mask:e,training:n,initialState:r})}))}static fromConfig(t,e){return 0===e.implmentation&&(e.implementation=1),new t(e)}}Wi.className="LSTM",e.serialization.registerClass(Wi);class Ui extends Fi{constructor(t){super(t),this.cells=t.cells}get stateSize(){const t=[];for(const e of this.cells.slice().reverse())Array.isArray(e.stateSize)?t.push(...e.stateSize):t.push(e.stateSize);return t}call(t,s){return e.tidy((()=>{let e=t.slice(1);const i=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?i.push(e.splice(0,t.stateSize.length)):i.push(e.splice(0,1));i.reverse();const n=[];let r;for(let a=0;a<this.cells.length;++a){const o=this.cells[a];e=i[a],r=0===a?[t[0]].concat(e):[r[0]].concat(e),r=o.call(r,s),n.push(r.slice(1))}e=[];for(const t of n.slice().reverse())e.push(...t);return[r[0]].concat(e)}))}build(t){let e;Mt(t)&&(t=t[0]),this.cells.forEach(((s,i)=>{j(`RNNCell_${i}`,(()=>{s.build(t),e=Array.isArray(s.stateSize)?s.stateSize[0]:s.stateSize,t=[t[0],e]}))})),this.built=!0}getConfig(){const t=super.getConfig(),e={cells:this.cells.map((t=>({className:t.getClassName(),config:t.getConfig()})))};return Object.assign({},t,e)}static fromConfig(t,e,s={}){const i=[];for(const t of e.cells)i.push(De(t,s));return new t({cells:i})}get trainableWeights(){if(!this.trainable)return[];const t=[];for(const e of this.cells)t.push(...e.trainableWeights);return t}get nonTrainableWeights(){const t=[];for(const e of this.cells)t.push(...e.nonTrainableWeights);if(!this.trainable){const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e.concat(t)}return t}getWeights(){const t=[];for(const e of this.cells)t.push(...e.weights);return qt(t)}setWeights(t){const e=[];for(const s of this.cells){const i=s.weights.length,n=t.splice(i);for(let t=0;t<s.weights.length;++t)e.push([s.weights[t],n[t]])}Jt(e)}}function Pi(t){const{ones:e,rate:s,training:n=!1,count:r=1,dropoutFunc:a}=t,o=()=>null!=a?a(e(),s):gt(e(),s),l=()=>ft(o,e,n);if(!r||r<=1)return i.keep(l().clone());return Array(r).fill(void 0).map(l).map((t=>i.keep(t.clone())))}Ui.className="StackedRNNCells",e.serialization.registerClass(Ui);var ji=function(t,e){var s={};for(var i in t)Object.prototype.hasOwnProperty.call(t,i)&&e.indexOf(i)<0&&(s[i]=t[i]);if(null!=t&&"function"==typeof Object.getOwnPropertySymbols){var n=0;for(i=Object.getOwnPropertySymbols(t);n<i.length;n++)e.indexOf(i[n])<0&&Object.prototype.propertyIsEnumerable.call(t,i[n])&&(s[i[n]]=t[i[n]])}return s};class Vi extends $i{constructor(t){if(t.unroll)throw new o("Unrolling is not possible with convolutional RNNs.");if(Array.isArray(t.cell))throw new o("It is not possible at the moment to stack convolutional cells.");super(t),this.inputSpec=[new Ht({ndim:5})]}call(t,e){return i.tidy((()=>{if(null!=this.cell.dropoutMask&&(i.dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(i.dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),e&&e.constants)throw new a("ConvRNN2D cell does not support constants");const s=null==e?null:e.mask,n=null==e?null:e.training,r=null==e?null:e.initialState;return super.call(t,{mask:s,training:n,initialState:r})}))}computeOutputShape(t){let e=this.computeSingleOutputShape(t);return this.returnSequences||(e=[e[0],...e.slice(2)]),this.returnState&&(e=[e,...Array(2).fill([t[0],...e.slice(-3)])]),e}getInitialState(t){return i.tidy((()=>{const{stateSize:e}=this.cell,s=t.shape,n=this.computeSingleOutputShape(s),r=[n[0],...n.slice(2)],a=i.zeros(r);return Array.isArray(e)?Array(e.length).fill(a):[a]}))}resetStates(t,s=!1){i.tidy((()=>{if(!this.stateful)throw new n("Cannot call resetStates() on an RNN Layer that is not stateful.");const r=this.inputSpec[0].shape,o=this.computeSingleOutputShape(r),l=[o[0],...o.slice(2)];if(null==r[0])throw new a("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(null==this.getStates())Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((()=>i.zeros(l))):this.states_=[i.zeros(l)];else if(null==t)i.dispose(this.states_),null!=this.keptStates&&(i.dispose(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map((()=>i.zeros(l))):this.states_[0]=i.zeros(l);else{if(Array.isArray(t)||(t=[t]),t.length!==this.states_.length)throw new a(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${t.length} state value(s). Input received: ${t}`);s?this.keptStates.push(this.states_.slice()):i.dispose(this.states_);for(let s=0;s<this.states_.length;++s){const i=t[s],n=l;if(!e.util.arraysEqual(i.shape,n))throw new a(`State ${s} is incompatible with layer ${this.name}: expected shape=${n}, received shape=${i.shape}`);this.states_[s]=i}}this.states_=this.states_.map((t=>i.keep(t.clone())))}))}computeSingleOutputShape(t){const{dataFormat:e,filters:s,kernelSize:i,padding:n,strides:r,dilationRate:a}=this.cell,o="channelsFirst"===e,l=t[o?3:2],u=t[o?4:3],h=gi(l,i[0],n,r[0],a[0]),c=gi(u,i[1],n,r[1],a[1]);return[...t.slice(0,2),...o?[s,h,c]:[h,c,s]]}}Vi.className="ConvRNN2D";class qi extends Bi{constructor(t){const{filters:e,kernelSize:s,strides:i,padding:n,dataFormat:r,dilationRate:a}=t;super(Object.assign({},t,{units:e})),this.filters=e,A(this.filters,"filters"),this.kernelSize=di(s,2,"kernelSize"),this.kernelSize.forEach((t=>A(t,"kernelSize"))),this.strides=di(i||1,2,"strides"),this.strides.forEach((t=>A(t,"strides"))),this.padding=n||"valid",W(this.padding),this.dataFormat=r||"channelsLast",B(this.dataFormat),this.dilationRate=di(a||1,2,"dilationRate"),this.dilationRate.forEach((t=>A(t,"dilationRate")))}build(t){var e;t=Ut(t);const s="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[s])throw new a(`The channel dimension of the input should be defined. Found ${t[s]}`);const n=t[s],r=this.kernelSize.concat([n,4*this.filters]);this.kernel=this.addWeight("kernel",r,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const o=this.kernelSize.concat([this.filters,4*this.filters]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",o,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let t;if(this.unitForgetBias){const s=this.biasInitializer,n=this.filters;t=new((e=class extends bt{apply(t,e){return rt([s.apply([n]),i.ones([n]),s.apply([2*n])])}}).className="CustomInit",e)}else t=this.biasInitializer;this.bias=this.addWeight("bias",[4*this.filters],null,t,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(t,e){return i.tidy((()=>{if(3!==t.length)throw new a(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${t.length}.`);const s=e.training||!1,n=t[0],r=t[1],o=t[2];0<this.dropout&&this.dropout<1&&null==this.dropoutMask&&(this.dropoutMask=Pi({ones:()=>i.onesLike(n),rate:this.dropout,training:s,count:4,dropoutFunc:this.dropoutFunc}));const l=this.dropoutMask,u=(t,e,s)=>e&&e[s]?i.mul(e[s],t):t;let h=u(n,l,0),c=u(n,l,1),p=u(n,l,2),d=u(n,l,3);0<this.recurrentDropout&&this.recurrentDropout<1&&null==this.recurrentDropoutMask&&(this.recurrentDropoutMask=Pi({ones:()=>i.onesLike(r),rate:this.recurrentDropout,training:s,count:4,dropoutFunc:this.dropoutFunc}));const g=this.recurrentDropoutMask;let f=u(r,g,0),m=u(r,g,1),y=u(r,g,2),b=u(r,g,3);const[w,z,k,S]=i.split(this.kernel.read(),4,3),[v,N,C,A]=this.useBias?i.split(this.bias.read(),4):[null,null,null,null];h=this.inputConv(h,w,v,this.padding),c=this.inputConv(c,z,N,this.padding),p=this.inputConv(p,k,C,this.padding),d=this.inputConv(d,S,A,this.padding);const[x,I,L,T]=i.split(this.recurrentKernel.read(),4,3);f=this.recurrentConv(f,x),m=this.recurrentConv(m,I),y=this.recurrentConv(y,L),b=this.recurrentConv(b,T);const E=this.recurrentActivation.apply(i.add(h,f)),D=this.recurrentActivation.apply(i.add(c,m)),$=i.add(i.mul(D,o),i.mul(E,this.activation.apply(i.add(p,y)))),F=i.mul(this.recurrentActivation.apply(i.add(d,b)),this.activation.apply($));return[F,F,$]}))}getConfig(){const t=super.getConfig(),e=ji(t,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},e,s)}inputConv(t,e,s,n){const r=i.conv2d(t,e,this.strides,n||"valid","channelsFirst"===this.dataFormat?"NCHW":"NHWC",this.dilationRate);return s?dt(r,s,this.dataFormat):r}recurrentConv(t,e){return i.conv2d(t,e,1,"same","channelsFirst"===this.dataFormat?"NCHW":"NHWC")}}qi.className="ConvLSTM2DCell",i.serialization.registerClass(qi);class Ji extends Vi{constructor(t){const e=new qi(t);super(Object.assign({},t,{cell:e}))}static fromConfig(t,e){return new t(e)}}Ji.className="ConvLSTM2D",i.serialization.registerClass(Ji);class Hi extends Xt{constructor(t){super(t),this.rate=Math.max(Math.min(t.rate,1),0),this.noiseShape=t.noiseShape,this.seed=t.seed,this.supportsMasking=!0}getNoiseShape(t){if(null==this.noiseShape)return this.noiseShape;const e=t.shape,s=[];for(let t=0;t<this.noiseShape.length;++t)s.push(null==this.noiseShape[t]?e[t]:this.noiseShape[t]);return s}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const e=Wt(t);if(0<this.rate&&this.rate<1){const t=null!=s.training&&s.training,i=this.getNoiseShape(e);return ft((()=>gt(e,this.rate,i,this.seed)),(()=>e),t)}return t}))}getConfig(){const t={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},e=super.getConfig();return Object.assign(t,e),t}dispose(){return super.dispose()}}Hi.className="Dropout",e.serialization.registerClass(Hi);class Zi extends Hi{constructor(t){super(t),this.inputSpec=[{ndim:3}]}getNoiseShape(t){const e=t.shape;return[e[0],1,e[2]]}}Zi.className="SpatialDropout1D",e.serialization.registerClass(Zi);class Ki extends Xt{constructor(t){if(super(t),this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",null==t.batchInputShape&&null==t.inputShape&&null!=t.inputDim){let e=null;null!=t.batchSize&&(e=t.batchSize),this.batchInputShape=[e,t.inputDim]}this.units=t.units,A(this.units,"units"),this.activation=Qs(t.activation),null!=t.useBias&&(this.useBias=t.useBias),this.kernelInitializer=_t(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=_t(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=be(t.kernelConstraint),this.biasConstraint=be(t.biasConstraint),this.kernelRegularizer=ai(t.kernelRegularizer),this.biasRegularizer=ai(t.biasRegularizer),this.activityRegularizer=ai(t.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(t){const e=(t=Ut(t))[t.length-1];null==this.kernel&&(this.kernel=this.addWeight("kernel",[e,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:e}}],this.built=!0}computeOutputShape(t){const e=(t=Ut(t)).slice();return e[e.length-1]=this.units,e}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const e=Wt(t),i=I(this.activation.getClassName());let n;return null!=i?n=ut(e,this.kernel.read(),i,this.bias?this.bias.read():null):(n=ut(e,this.kernel.read()),null!=this.bias&&(n=dt(n,this.bias.read())),null!=this.activation&&(n=this.activation.apply(n))),n}))}getConfig(){const t={units:this.units,activation:Ys(this.activation),useBias:this.useBias,kernelInitializer:Rt(this.kernelInitializer),biasInitializer:Rt(this.biasInitializer),kernelRegularizer:ni(this.kernelRegularizer),biasRegularizer:ni(this.biasRegularizer),activityRegularizer:ni(this.activityRegularizer),kernelConstraint:me(this.kernelConstraint),biasConstraint:me(this.biasConstraint)},e=super.getConfig();return Object.assign(t,e),t}}Ki.className="Dense",e.serialization.registerClass(Ki);class Gi extends Xt{constructor(t){super(t=t||{}),this.inputSpec=[{minNDim:3}],this.dataFormat=t.dataFormat}computeOutputShape(t){t=Ut(t);for(const e of t.slice(1))if(null==e)throw new a(`The shape of the input to "Flatten" is not fully defined (got ${t.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[t[0],Z(t,1)]}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);let n=Wt(t);if("channelsFirst"===this.dataFormat&&n.rank>1){const t=[0];for(let e=2;e<n.rank;++e)t.push(e);t.push(1),n=e.transpose(n,t)}return function(t){if(t.rank<=1)throw new a(`batchFlatten requires a minimum rank of 2. Got rank: ${t.rank}.`);const e=[t.shape[0],Z(t.shape,1)];return i.reshape(t,e)}(n)}))}getConfig(){const t={};null!=this.dataFormat&&(t.dataFormat=this.dataFormat);const e=super.getConfig();return Object.assign(t,e),t}}Gi.className="Flatten",e.serialization.registerClass(Gi);class Yi extends Xt{constructor(t){super(t),this.supportsMasking=!0,this.activation=Qs(t.activation)}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const e=Wt(t);return this.activation.apply(e)}))}getConfig(){const t={activation:Ys(this.activation)},e=super.getConfig();return Object.assign(t,e),t}}Yi.className="Activation",e.serialization.registerClass(Yi);class Xi extends Xt{constructor(t){super(t),this.n=t.n,this.inputSpec=[{ndim:2}]}computeOutputShape(t){return[t[0],this.n,t[1]]}call(t,s){return e.tidy((()=>{return t=Wt(t),s=t,i=this.n,e.tidy((()=>{if(2!==s.shape.length)throw new a(`repeat() expects a rank-2 tensor, but received a rank-${s.shape.length} tensor.`);return ot(et(s,1),[1,i,1])}));var s,i}))}getConfig(){const t={n:this.n},e=super.getConfig();return Object.assign(t,e),t}}Xi.className="RepeatVector",e.serialization.registerClass(Xi);class Qi extends Xt{constructor(t){super(t),this.targetShape=t.targetShape;for(let t=0;t<this.targetShape.length;++t)this.isUnknown(this.targetShape[t])&&(this.targetShape[t]=null)}isUnknown(t){return t<0||null==t}fixUnknownDimension(t,e){const s="Total size of new array must be unchanged.",i=e.slice();let n=1,r=null;for(let t=0;t<i.length;++t){const e=i[t];if(this.isUnknown(e)){if(null!==r)throw new a("Can only specifiy one unknown dimension.");r=t}else n*=e}const o=Z(t);if(null!==r){if(0===n||o%n!=0)throw new a(s);i[r]=o/n}else if(o!==n)throw new a(s);return i}computeOutputShape(t){let e=!1;for(let s=0;s<t.length;++s)if(this.isUnknown(t[s])){e=!0;break}return e?t.slice(0,1).concat(this.targetShape):t.slice(0,1).concat(this.fixUnknownDimension(t.slice(1),this.targetShape))}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const i=Wt(t),n=i.shape,r=n.slice(0,1).concat(this.fixUnknownDimension(n.slice(1),this.targetShape));return e.reshape(i,r)}))}getConfig(){const t={targetShape:this.targetShape},e=super.getConfig();return Object.assign(t,e),t}}Qi.className="Reshape",e.serialization.registerClass(Qi);class tn extends Xt{constructor(t){if(super(t),null==t.dims)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(t.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${t.dims} instead.`);const s=Y(1,t.dims.length+1);if(!e.util.arraysEqual(t.dims.slice().sort(),s))throw new Error("Invalid permutation `dims`: "+JSON.stringify(t.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=t.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new Ht({ndim:this.dims.length+1})]}computeOutputShape(t){const e=(t=Ut(t)).slice();return this.dims.forEach(((s,i)=>{e[i+1]=t[s]})),e}call(t,s){return e.transpose(Wt(t),this.dimsIncludingBatch)}getConfig(){const t={dims:this.dims},e=super.getConfig();return Object.assign(t,e),t}}tn.className="Permute",e.serialization.registerClass(tn);class en extends Xt{constructor(t){super(null==t?{}:t),this.supportsMasking=!0,this.maskValue=null!=t?null==t.maskValue?0:t.maskValue:0}computeOutputShape(t){return t}getConfig(){const t=super.getConfig(),e={maskValue:this.maskValue};return Object.assign(e,t),e}computeMask(t,s){const i=Wt(t);return e.any(e.notEqual(i,this.maskValue),-1)}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const i=Wt(t),n=e.any(e.notEqual(i,this.maskValue),-1,!0);return e.mul(i,e.cast(n,i.dtype))}))}}en.className="Masking",e.serialization.registerClass(en);class sn extends Xt{constructor(t){if(super(t),this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",null==t.batchInputShape&&null==t.inputShape){let e=null;null!=t.batchSize&&(e=t.batchSize),null==t.inputLength?this.batchInputShape=[e,null]:this.batchInputShape=[e].concat(g(t.inputLength))}this.inputDim=t.inputDim,A(this.inputDim,"inputDim"),this.outputDim=t.outputDim,A(this.outputDim,"outputDim"),this.embeddingsInitializer=_t(t.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=ai(t.embeddingsRegularizer),this.activityRegularizer=ai(t.activityRegularizer),this.embeddingsConstraint=be(t.embeddingsConstraint),this.maskZero=t.maskZero,this.supportsMasking=t.maskZero,this.inputLength=t.inputLength}build(t){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(t){}computeMask(t,s){return e.tidy((()=>this.maskZero?(t=Wt(t),e.notEqual(t,e.zerosLike(t))):null))}computeOutputShape(t){if(t=Ut(t),null==this.inputLength)return[...t,this.outputDim];const e=g(this.inputLength);if(e.length!==t.length-1)throw new a(`"inputLength" is ${this.inputLength}, but received input shape has shape ${t}`);{let s=0;for(let i=0;i<e.length;++i){const n=e[i],r=t[i+1];if(null!=n&&null!=r&&n!==r)throw new a(`"inputLength" is ${this.inputLength}, but received input shape has shape ${t}`);null==n&&(e[s]=r),s++}}return[t[0],...e,this.outputDim]}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);let i=Wt(t);"int32"!==i.dtype&&(i=tt(i,"int32"));const n=ht(this.embeddings.read(),e.reshape(i,[i.size]));return e.reshape(n,Ut(this.computeOutputShape(i.shape)))}))}getConfig(){const t={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:Rt(this.embeddingsInitializer),embeddingsRegularizer:ni(this.embeddingsRegularizer),activityRegularizer:ni(this.activityRegularizer),embeddingsConstraint:me(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},e=super.getConfig();return Object.assign(t,e),t}}sn.className="Embedding",e.serialization.registerClass(sn);class nn extends Xt{constructor(t){super(t||{}),this.supportsMasking=!0}mergeFunction(t){throw new o}computeElementwiseOpOutputShape(t,e){if(null==t||null==e)return null;if(t.length<e.length)return this.computeElementwiseOpOutputShape(e,t);if(0===e.length)return t;const s=t.slice(0,t.length-e.length);for(let i=0;i<e.length;++i){const n=t[t.length-e.length+i],r=e[i];if(null==n||null==r||n<0||r<0)s.push(null);else if(1===n)s.push(r);else if(1===r)s.push(n);else{if(n!==r)throw new a("Operands could not be broadcast together with shapes "+JSON.stringify(t)+" "+JSON.stringify(e));s.push(n)}}return s}build(t){if(Array.isArray(t)&&!Array.isArray(t[0])&&(t=[Ut(t)]),t.length<2)throw new a(`A merge layer should be called on an Array of at least 2 inputs. Got ${t.length} input(s).`);let e=[];for(const s of t)null!=s&&null!==s[0]&&e.push(s[0]);if(e=S(e),e.length>1)throw new a(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(t)}.`);let s=null==t[0]?null:t[0].slice(1);for(let e=1;e<t.length;++e){const i=null==t[e]?null:t[e].slice(1);s=this.computeElementwiseOpOutputShape(s,i)}const i=t.map((t=>t.length));-1===t.indexOf(null)&&1===S(i).length?this.reshapeRequired=!1:this.reshapeRequired=!0}call(t,s){return e.tidy((()=>{if(this.reshapeRequired){const e=[],s=t.map((t=>t.rank));if(-1===s.indexOf(null)){const i=G(s);for(let s of t){const t=s.rank;for(let e=0;e<i-t;++e)s=et(s,1);e.push(s)}return this.mergeFunction(e)}{let s=!1;for(const n of t){const t=n.rank;if(null==t){const t=n.shape,r=t[0],a=t.slice(1).concat([r]);let o=i.reshape(n,[r].concat(Z(t.slice(1))));o=i.transpose(o,[1,0]),o=i.reshape(o,a),e.push(o),s=!0}else if(t>1){const r=Y(1,t).concat([0]);e.push(i.transpose(n,r)),s=!0}else e.push(n)}let n=this.mergeFunction(e);const r=n.rank;if(s)if(null==r){const t=n.shape,e=t[t.length-1],s=[e].concat(t.slice(0,t.length-1));n=i.reshape(i.transpose(i.reshape(n,[-1,e]),[1,0]),s)}else if(r>1){const t=[r-1].concat(Y(0,r-1));n=i.transpose(n,t)}return n}}return this.mergeFunction(t)}))}computeOutputShape(t){let e;e=null==t[0]?null:t[0].slice(1);for(let s=1;s<t.length;++s){const i=null==t[s]?null:t[s].slice(1);e=this.computeElementwiseOpOutputShape(e,i)}let s=[];for(const e of t)null!=e&&null!==e[0]&&s.push(e[0]);return s=S(s),e=1===s.length?s.concat(e):[null].concat(e),e}computeMask(t,e){return i.tidy((()=>{if(null==e)return null;if(!Array.isArray(e))throw new a("`mask` should be an Array");if(!Array.isArray(t))throw new a("`inputs` should be an Array");if(e.length!==t.length)throw new a(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${t.length} vs ${e.length})`);if(e.every((t=>null==t)))return null;let s=(e=e.map((t=>null==t?t:i.expandDims(t,0))))[0];for(let t=1;t<e.length-1;++t)s=i.logicalAnd(s,e[t]);return s}))}}class rn extends nn{constructor(t){super(t)}mergeFunction(t){return e.tidy((()=>{let e=t[0].clone();for(let s=1;s<t.length;++s)e=i.add(e,t[s]);return e}))}}rn.className="Add",e.serialization.registerClass(rn);class an extends nn{constructor(t){super(t)}mergeFunction(t){return e.tidy((()=>{let e=t[0].clone();for(let s=1;s<t.length;++s)e=i.mul(e,t[s]);return e}))}}an.className="Multiply",e.serialization.registerClass(an);class on extends nn{constructor(t){super(t)}mergeFunction(t){return e.tidy((()=>{let e=t[0].clone();for(let s=1;s<t.length;++s)e=i.add(e,t[s]);return i.mul(1/t.length,e)}))}}on.className="Average",e.serialization.registerClass(on);class ln extends nn{constructor(t){super(t)}mergeFunction(t){return e.tidy((()=>{let e=t[0];for(let s=1;s<t.length;++s)e=i.maximum(e,t[s]);return e}))}}ln.className="Maximum",e.serialization.registerClass(ln);class un extends nn{constructor(t){super(t)}mergeFunction(t){return e.tidy((()=>{let e=t[0];for(let s=1;s<t.length;++s)e=i.minimum(e,t[s]);return e}))}}un.className="Minimum",e.serialization.registerClass(un);class hn extends nn{constructor(t){super(t),this.DEFAULT_AXIS=-1,null==t&&(t={}),this.axis=null==t.axis?this.DEFAULT_AXIS:t.axis,this.supportsMasking=!0,this.reshapeRequired=!1}build(t){if(!Array.isArray(t)||!Array.isArray(t[0])||1===t.length)throw new a("A `Concatenate` layer should be called on a list of at least 2 inputs");let s=!0;for(const e of t)if(null!=e){s=!1;break}if(s)return;const i=[];for(let s=0;s<t.length;++s){const n=t[s].slice();n.splice(this.axis,1);let r=!1;for(const t of i)if(e.util.arraysEqual(t,n)){r=!0;break}r||i.push(n)}if(i.length>1)throw new a("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(t))}mergeFunction(t){return e.tidy((()=>rt(t,this.axis)))}computeOutputShape(t){if(!Array.isArray(t)||!Array.isArray(t[0]))throw new a("A `Concatenate` layer should be called on a list of inputs.");const e=t,s=e[0].slice(),i=this.axis<0?s.length+this.axis:this.axis;for(const t of e.slice(1)){if(null==s[i]||null==t[i]){s[i]=null;break}s[i]+=t[i]}return s}computeMask(t,e){if(null==e)return null;if(!Array.isArray(e))throw new a("`mask` should be an array for Concatenate");if(!Array.isArray(t))throw new a("`inputs` should be an array for Concatenate");if(e.length!==t.length)throw new a(`Mismatch in the length of mask (${e.length}) and the legnth of inputs (${t.length})`);return i.tidy((()=>{let s=!0;if(e.forEach((t=>{null==t||(s=!1)})),s)return null;const n=[];for(let s=0;s<t.length;++s)null==e[s]?n.push(i.cast(i.onesLike(t[s]),"bool")):e[s].rank<t[s].rank?n.push(i.expandDims(e[s],-1)):n.push(e[s]);const r=i.concat(n,this.axis);return i.all(r,-1,!1)}))}getConfig(){const t={axis:this.axis},e=super.getConfig();return Object.assign(t,e),t}}function cn(t,e){for(;t<0;)t+=e;return t}hn.className="Concatenate",e.serialization.registerClass(hn);class pn extends nn{constructor(t){super(t),this.axes=t.axes,this.normalize=null!=t.normalize&&t.normalize,this.supportsMasking=!0,this.reshapeRequired=!1}build(t){i.util.assert(Array.isArray(t)&&2===t.length&&Array.isArray(t[0])&&Array.isArray(t[1]),(()=>"A `Dot` layer should be called on a list of exactly 2 inputs."));const e=t[0],s=t[1];if(e.length>3||s.length>3)throw new o("Dot layer does not support tensors of 4D or higher rank yet.");const n=this.interpretAxes(e,s);if(e[n[0]]!==s[n[1]])throw new a(`Dimension incompatibility: ${e[n[0]]} !== ${s[n[1]]}`)}mergeFunction(t){if(2!==t.length)throw new a(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${t.length} input(s).`);let e,s=t[0],n=t[1];return e=Array.isArray(this.axes)?this.axes.map(((e,s)=>cn(e,t[s].shape.length))):[cn(this.axes,s.shape.length),cn(this.axes,n.shape.length)],this.normalize&&(s=$e(s,e[0]),n=$e(n,e[1])),function(t,e,s){if(t.shape.length>3||e.shape.length>3)throw new o("batchDot is not implemented for tensors of 4D or higher rank yet");if(i.util.assert(t.shape.length>=2,(()=>`batchDot requires the rank of x to be >= 2, but got ${t.shape.length}`)),i.util.assert(t.shape.length>=2,(()=>`batchDot requires the rank of y to be >= 2, but got ${e.shape.length}`)),"number"==typeof s&&(s=[s,s]),"complex64"===t.dtype||"complex64"===e.dtype)throw new o("batchDot is not implemented for complex64-type Tensors yet.");const n=t.shape.length,r=e.shape.length;null==s&&(s=[n-1,r-2]);const a=s;return i.tidy((()=>{let s,o;if(n>r){s=n-r;const t=[];for(let e=0;e<s;++e)t.push(1);e=i.reshape(e,e.shape.concat(t))}else if(r>n){s=r-n;const e=[];for(let t=0;t<s;++t)e.push(1);t=i.reshape(t,t.shape.concat(e))}else s=0;if(2===t.shape.length&&2===e.shape.length)o=a[0]===a[1]?i.sum(i.mul(t,e),a[0]):i.sum(i.mul(i.transpose(t,[1,0]),e),a[1]);else{const s=a[0]!==t.shape.length-1,n=a[1]===e.shape.length-1;o=i.matMul(t,e,s,n)}if(s>0){let t;t=n>r?n+r-3:n-1;const e=[];for(let i=t;i<t+s;++i)e.push(i);o=i.squeeze(o,e)}return 1===o.shape.length&&(o=i.expandDims(o,1)),o}))}(s,n,e)}interpretAxes(t,e){let s;return s=Array.isArray(this.axes)?this.axes:[cn(this.axes,t.length),cn(this.axes,e.length)],s}computeOutputShape(t){i.util.assert(Array.isArray(t)&&2===t.length&&Array.isArray(t[0])&&Array.isArray(t[1]),(()=>"A `Dot` layer should be called on a list of exactly 2 inputs."));const e=t[0].slice(),s=t[1].slice();if(e.length>3||s.length>3)throw new o("Dot layer does not support tensors of 4D or higher rank yet.");const n=this.interpretAxes(e,s);e.splice(n[0],1),s.splice(n[1],1),s.splice(0,1);const r=e.concat(s);return 1===r.length&&r.push(1),r}computeMask(t,e){return null}getConfig(){const t={axes:this.axes,normalize:this.normalize},e=super.getConfig();return Object.assign(t,e),t}}pn.className="Dot",e.serialization.registerClass(pn);class dn extends Xt{constructor(t){super(t),this.supportsMasking=!0,this.stddev=t.stddev}computeOutputShape(t){return t}getConfig(){const t=super.getConfig(),e={stddev:this.stddev};return Object.assign(e,t),e}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const i=Wt(t);return ft((()=>e.add(lt(i.shape,0,this.stddev),i)),(()=>i),s.training||!1)}))}}dn.className="GaussianNoise",e.serialization.registerClass(dn);class gn extends Xt{constructor(t){super(t),this.supportsMasking=!0,this.rate=t.rate}computeOutputShape(t){return t}getConfig(){const t=super.getConfig(),e={rate:this.rate};return Object.assign(e,t),e}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s);const i=Wt(t);if(this.rate>0&&this.rate<1){return ft((()=>{const t=Math.sqrt(this.rate/(1-this.rate));return e.mul(i,lt(i.shape,1,t))}),(()=>i),s.training||!1)}return i}))}}gn.className="GaussianDropout",e.serialization.registerClass(gn);class fn extends Xt{constructor(t){super(t),this.supportsMasking=!0,this.rate=t.rate,this.noiseShape=t.noiseShape}_getNoiseShape(t){return this.noiseShape||Wt(t).shape}computeOutputShape(t){return t}getConfig(){const t=super.getConfig(),e={rate:this.rate};return Object.assign(e,t),e}call(t,s){return e.tidy((()=>{if(this.rate<1&&this.rate>0){const i=this._getNoiseShape(t),n=()=>{const s=Wt(t),n=-1.7580993408473766;let r=e.greaterEqual(e.randomUniform(i),this.rate);r=tt(r,"float32");const a=((1-this.rate)*(1+this.rate*n**2))**-.5,o=-a*n*this.rate,l=e.add(e.mul(s,r),e.mul(e.add(r,-1),n));return e.add(e.mul(l,a),o)};return ft(n,(()=>Wt(t)),s.training||!1)}return t}))}}function mn(t,e,s,n,r,a=.001){let l;if(2===t.rank)l=i.batchNorm2d(t,e,s,n,r,a);else if(3===t.rank)l=i.batchNorm3d(t,e,s,n,r,a);else{if(4!==t.rank)throw new o(`batchNormalization is not implemented for array of rank ${t.rank} yet`);l=i.batchNorm4d(t,e,s,n,r,a)}return l}function yn(t,s,n,r,a=.001){return e.util.arraysEqual(r.slice().sort(),Y(0,t.rank-1))?function(t,s,n,r,a=.001){return e.tidy((()=>{const e=i.moments(t,r),o=e.mean,l=e.variance;return[mn(t,o,l,n,s,a),o,l]}))}(t,s,n,r,a):function(t,s,n,r,a=.001){return e.tidy((()=>{const o=i.moments(t,r),l=o.mean,u=o.variance,h=[];for(const e of Y(0,t.rank))-1!==r.indexOf(e)?h.push(1):h.push(t.shape[e]);const c=e.reshape(l,h),p=e.reshape(u,h),d=null==s?null:e.reshape(s,h),g=null==n?null:e.reshape(n,h);return[mn(t,c,p,g,d,a),l,u]}))}(t,s,n,r,a)}fn.className="AlphaDropout",e.serialization.registerClass(fn);class bn extends Xt{constructor(t){null==t&&(t={}),super(t),this.supportsMasking=!0,this.axis=null==t.axis?-1:t.axis,this.momentum=null==t.momentum?.99:t.momentum,this.epsilon=null==t.epsilon?.001:t.epsilon,this.center=null==t.center||t.center,this.scale=null==t.scale||t.scale,this.betaInitializer=_t(t.betaInitializer||"zeros"),this.gammaInitializer=_t(t.gammaInitializer||"ones"),this.movingMeanInitializer=_t(t.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=_t(t.movingVarianceInitializer||"ones"),this.betaConstraint=be(t.betaConstraint),this.gammaConstraint=be(t.gammaConstraint),this.betaRegularizer=ai(t.betaRegularizer),this.gammaRegularizer=ai(t.gammaRegularizer)}build(t){t=Ut(t);const e=this.axis>=0?this.axis:this.axis+t.length,s=t[e];if(null==s)throw new a(`Axis ${e} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(t)}.`);this.inputSpec=[new Ht({ndim:t.length,axes:{[e]:s}})];const i=[s];this.scale&&(this.gamma=this.addWeight("gamma",i,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",i,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",i,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",i,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(t,s){return e.tidy((()=>{const n=null!=s.training&&s.training,r=Wt(t),a=r.shape,o=a.length,l=Y(0,o),u=this.axis>=0?this.axis:this.axis+o;l.splice(u,1);const c=h(1,o);c[u]=a[u];const p=l.slice();p.sort();const d=!e.util.arraysEqual(p,Y(0,o).slice(0,o-1));if(!n)return(()=>{if(d){const t=e.reshape(this.movingMean.read(),c),s=e.reshape(this.movingVariance.read(),c),i=this.center?e.reshape(this.beta.read(),c):null,n=this.scale?e.reshape(this.gamma.read(),c):null;return mn(r,t,s,i,n,this.epsilon)}return mn(r,this.movingMean.read(),this.movingVariance.read(),null==this.beta?null:this.beta.read(),null==this.gamma?null:this.gamma.read(),this.epsilon)})();const[g,f,m]=yn(r,this.gamma.read(),this.beta.read(),l,this.epsilon),y=(t,e,s)=>{i.tidy((()=>{const n=1-s,r=t.read(),a=i.mul(i.sub(r,e),n);t.write(i.sub(r,a))}))};return(()=>{y(this.movingMean,f,this.momentum),y(this.movingVariance,m,this.momentum)})(),g}))}getConfig(){const t={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Rt(this.betaInitializer),gammaInitializer:Rt(this.gammaInitializer),movingMeanInitializer:Rt(this.movingMeanInitializer),movingVarianceInitializer:Rt(this.movingVarianceInitializer),betaRegularizer:ni(this.betaRegularizer),gammaRegularizer:ni(this.gammaRegularizer),betaConstraint:me(this.betaConstraint),gammaConstraint:me(this.gammaConstraint)},e=super.getConfig();return Object.assign(t,e),t}}bn.className="BatchNormalization",e.serialization.registerClass(bn);class wn extends Xt{constructor(t){if(null==t&&(t={}),super(t),this.axis=null==t.axis?-1:t.axis,"number"==typeof this.axis){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else{if(!Array.isArray(this.axis))throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);for(const t of this.axis)if(!Number.isInteger(t))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}this.epsilon=null==t.epsilon?.001:t.epsilon,this.center=null==t.center||t.center,this.scale=null==t.scale||t.scale,this.betaInitializer=_t(t.betaInitializer||"zeros"),this.gammaInitializer=_t(t.gammaInitializer||"ones"),this.betaRegularizer=ai(t.betaRegularizer),this.gammaRegularizer=ai(t.gammaRegularizer),this.supportsMasking=!0}build(t){const e=(t=Ut(t)).length;"number"==typeof this.axis&&(this.axis=[this.axis]);for(let t=0;t<this.axis.length;++t)this.axis[t]<0&&(this.axis[t]+=e);for(const t of this.axis)if(t<0||t>=e)throw new Error(`Invalid axis: ${t}`);if(this.axis.length!==S(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const s=this.axis.map((e=>t[e]));this.scale?this.gamma=this.addWeight("gamma",s,"float32",this.gammaInitializer,this.gammaRegularizer,true):this.gamma=null,this.center?this.beta=this.addWeight("beta",s,"float32",this.betaInitializer,this.betaRegularizer,true):this.beta=null,this.built=!0}call(t,s){const n=Wt(t),r=n.shape,a=r.length;return e.tidy((()=>{let{mean:t,variance:s}=e.moments(n,this.axis,!0);const o=h(1,a);for(const t of this.axis)o[t]=r[t];const l=t=>null!=t&&t.shape.length!==a?i.reshape(t,o):t;let u=this.scale?l(this.gamma.read()):null,c=this.center?l(this.beta.read()):null;const p=[],d=[];for(let t=0;t<a;++t)-1!==this.axis.indexOf(t)?(p.push(r[t]),d.push(1)):(p.push(1),d.push(r[t]));return t=i.tile(t,p),s=i.tile(s,p),null!=u&&(u=i.tile(u,d)),null!=c&&(c=i.tile(c,d)),mn(n,t,s,c,u,this.epsilon)}))}getConfig(){const t={axis:this.axis,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Rt(this.betaInitializer),gammaInitializer:Rt(this.gammaInitializer),betaRegularizer:ni(this.betaRegularizer),gammaRegularizer:ni(this.gammaRegularizer)},e=super.getConfig();return Object.assign(t,e),t}}wn.className="LayerNormalization",e.serialization.registerClass(wn);class zn extends Xt{constructor(t){if(null==t&&(t={}),super(t),this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,null==t.padding)this.padding=[[1,1],[1,1]];else if("number"==typeof t.padding)this.padding=[[t.padding,t.padding],[t.padding,t.padding]];else{if(t.padding=t.padding,2!==t.padding.length)throw new a(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${t.padding.length} array.`);let e,s;if("number"==typeof t.padding[0])e=[t.padding[0],t.padding[0]],s=[t.padding[1],t.padding[1]];else{if(t.padding=t.padding,2!==t.padding[0].length)throw new a(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${t.padding[0].length} array.`);if(e=t.padding[0],2!==t.padding[1].length)throw new a(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${t.padding[1].length} array.`);s=t.padding[1]}this.padding=[e,s]}this.inputSpec=[new Ht({ndim:4})]}computeOutputShape(t){let e,s;return t=Ut(t),"channelsFirst"===this.dataFormat?(e=null!=t[2]&&t[2]>=0?t[2]+this.padding[0][0]+this.padding[0][1]:null,s=null!=t[3]&&t[3]>=0?t[3]+this.padding[1][0]+this.padding[1][1]:null,[t[0],t[1],e,s]):(e=null!=t[1]&&t[1]>=0?t[1]+this.padding[0][0]+this.padding[0][1]:null,s=null!=t[2]&&t[2]>=0?t[2]+this.padding[1][0]+this.padding[1][1]:null,[t[0],e,s,t[3]])}call(t,s){return e.tidy((()=>{return s=Wt(t),n=this.padding,r=this.dataFormat,e.tidy((()=>{if(4!==s.rank)throw new a(`temporalPadding expects input tensor to be 4-D, but received a ${s.rank}-D tensor.`);if(null==n&&(n=[[1,1],[1,1]]),2!==n.length||2!==n[0].length||2!==n[1].length)throw new a("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(null==r&&(r="channelsLast"),"channelsLast"!==r&&"channelsFirst"!==r)throw new a(`Unknown data format: ${r}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let t;return t="channelsFirst"===r?[[0,0],[0,0],n[0],n[1]]:[[0,0],n[0],n[1],[0,0]],i.pad(s,t)}));var s,n,r}))}getConfig(){const t={padding:this.padding,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}function kn(t,s,n,r,a,o){return e.tidy((()=>{let e;B(a),U(o),W(r),null==n&&(n=[1,1]),null==r&&(r="valid"),null==a&&(a="channelsLast"),null==o&&(o="max"),t=mi(t,a);const l="same"===r?"same":"valid";return e="max"===o?i.maxPool(t,s,n,l):i.avgPool(t,s,n,l),"channelsFirst"===a&&(e=i.transpose(e,[0,3,1,2])),e}))}function Sn(t,s,n,r,a,o){return e.tidy((()=>{let e;B(a),U(o),W(r),null==n&&(n=[1,1,1]),null==r&&(r="valid"),null==a&&(a="channelsLast"),null==o&&(o="max"),t=yi(t,a);const l="same"===r?"same":"valid";return e="max"===o?i.maxPool3d(t,s,n,l):i.avgPool3d(t,s,n,l),"channelsFirst"===a&&(e=i.transpose(e,[0,4,1,2,3])),e}))}zn.className="ZeroPadding2D",e.serialization.registerClass(zn);class vn extends Xt{constructor(t){if(null==t.poolSize&&(t.poolSize=2),super(t),"number"==typeof t.poolSize)this.poolSize=[t.poolSize];else{if(!Array.isArray(t.poolSize)||1!==t.poolSize.length||"number"!=typeof t.poolSize[0])throw new a(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(t.poolSize)}`);this.poolSize=t.poolSize}if(A(this.poolSize,"poolSize"),null==t.strides)this.strides=this.poolSize;else if("number"==typeof t.strides)this.strides=[t.strides];else{if(!Array.isArray(t.strides)||1!==t.strides.length||"number"!=typeof t.strides[0])throw new a(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(t.strides)}`);this.strides=t.strides}A(this.strides,"strides"),this.padding=null==t.padding?"valid":t.padding,W(this.padding),this.inputSpec=[new Ht({ndim:3})]}computeOutputShape(t){const e=gi((t=Ut(t))[1],this.poolSize[0],this.padding,this.strides[0]);return[t[0],e,t[2]]}call(t,s){return e.tidy((()=>{this.invokeCallHook(t,s),t=et(Wt(t),2);const e=this.poolingFunction(Wt(t),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return i.squeeze(e,[2])}))}getConfig(){const t={poolSize:this.poolSize,padding:this.padding,strides:this.strides},e=super.getConfig();return Object.assign(t,e),t}}class Nn extends vn{constructor(t){super(t)}poolingFunction(t,e,s,i,n){return B(n),W(i),kn(t,e,s,i,n,"max")}}Nn.className="MaxPooling1D",e.serialization.registerClass(Nn);class Cn extends vn{constructor(t){super(t)}poolingFunction(t,e,s,i,n){return B(n),W(i),kn(t,e,s,i,n,"avg")}}Cn.className="AveragePooling1D",e.serialization.registerClass(Cn);class An extends Xt{constructor(t){if(null==t.poolSize&&(t.poolSize=[2,2]),super(t),this.poolSize=Array.isArray(t.poolSize)?t.poolSize:[t.poolSize,t.poolSize],null==t.strides)this.strides=this.poolSize;else if(Array.isArray(t.strides)){if(2!==t.strides.length)throw new a(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${t.strides.length}.`);this.strides=t.strides}else this.strides=[t.strides,t.strides];A(this.poolSize,"poolSize"),A(this.strides,"strides"),this.padding=null==t.padding?"valid":t.padding,this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,B(this.dataFormat),W(this.padding),this.inputSpec=[new Ht({ndim:4})]}computeOutputShape(t){t=Ut(t);let e="channelsFirst"===this.dataFormat?t[2]:t[1],s="channelsFirst"===this.dataFormat?t[3]:t[2];return e=gi(e,this.poolSize[0],this.padding,this.strides[0]),s=gi(s,this.poolSize[1],this.padding,this.strides[1]),"channelsFirst"===this.dataFormat?[t[0],t[1],e,s]:[t[0],e,s,t[3]]}call(t,s){return e.tidy((()=>(this.invokeCallHook(t,s),this.poolingFunction(Wt(t),this.poolSize,this.strides,this.padding,this.dataFormat))))}getConfig(){const t={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}class xn extends An{constructor(t){super(t)}poolingFunction(t,e,s,i,n){return B(n),W(i),kn(t,e,s,i,n,"max")}}xn.className="MaxPooling2D",e.serialization.registerClass(xn);class In extends An{constructor(t){super(t)}poolingFunction(t,e,s,i,n){return B(n),W(i),kn(t,e,s,i,n,"avg")}}In.className="AveragePooling2D",e.serialization.registerClass(In);class Ln extends Xt{constructor(t){if(null==t.poolSize&&(t.poolSize=[2,2,2]),super(t),this.poolSize=Array.isArray(t.poolSize)?t.poolSize:[t.poolSize,t.poolSize,t.poolSize],null==t.strides)this.strides=this.poolSize;else if(Array.isArray(t.strides)){if(3!==t.strides.length)throw new a(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${t.strides.length}.`);this.strides=t.strides}else this.strides=[t.strides,t.strides,t.strides];A(this.poolSize,"poolSize"),A(this.strides,"strides"),this.padding=null==t.padding?"valid":t.padding,this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,B(this.dataFormat),W(this.padding),this.inputSpec=[new Ht({ndim:5})]}computeOutputShape(t){t=Ut(t);let e="channelsFirst"===this.dataFormat?t[2]:t[1],s="channelsFirst"===this.dataFormat?t[3]:t[2],i="channelsFirst"===this.dataFormat?t[4]:t[3];return e=gi(e,this.poolSize[0],this.padding,this.strides[0]),s=gi(s,this.poolSize[1],this.padding,this.strides[1]),i=gi(i,this.poolSize[2],this.padding,this.strides[2]),"channelsFirst"===this.dataFormat?[t[0],t[1],e,s,i]:[t[0],e,s,i,t[4]]}call(t,s){return e.tidy((()=>(this.invokeCallHook(t,s),this.poolingFunction(Wt(t),this.poolSize,this.strides,this.padding,this.dataFormat))))}getConfig(){const t={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}class Tn extends Ln{constructor(t){super(t)}poolingFunction(t,e,s,i,n){return B(n),W(i),Sn(t,e,s,i,n,"max")}}Tn.className="MaxPooling3D",e.serialization.registerClass(Tn);class En extends Ln{constructor(t){super(t)}poolingFunction(t,e,s,i,n){return B(n),W(i),Sn(t,e,s,i,n,"avg")}}En.className="AveragePooling3D",e.serialization.registerClass(En);class Dn extends Xt{constructor(t){super(t),this.inputSpec=[new Ht({ndim:3})]}computeOutputShape(t){return[t[0],t[2]]}call(t,e){throw new o}}class $n extends Dn{constructor(t){super(t||{})}call(t,s){return e.tidy((()=>{const e=Wt(t);return i.mean(e,1)}))}}$n.className="GlobalAveragePooling1D",e.serialization.registerClass($n);class Fn extends Dn{constructor(t){super(t||{})}call(t,s){return e.tidy((()=>{const e=Wt(t);return i.max(e,1)}))}}Fn.className="GlobalMaxPooling1D",e.serialization.registerClass(Fn);class On extends Xt{constructor(t){super(t),this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,B(this.dataFormat),this.inputSpec=[new Ht({ndim:4})]}computeOutputShape(t){return"channelsLast"===this.dataFormat?[t[0],t[3]]:[t[0],t[1]]}call(t,e){throw new o}getConfig(){const t={dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}class Rn extends On{call(t,s){return e.tidy((()=>{const e=Wt(t);return"channelsLast"===this.dataFormat?i.mean(e,[1,2]):i.mean(e,[2,3])}))}}Rn.className="GlobalAveragePooling2D",e.serialization.registerClass(Rn);class _n extends On{call(t,s){return e.tidy((()=>{const e=Wt(t);return"channelsLast"===this.dataFormat?i.max(e,[1,2]):i.max(e,[2,3])}))}}_n.className="GlobalMaxPooling2D",e.serialization.registerClass(_n);class Mn extends Xt{constructor(t){super(t),this.layer=t.layer}build(t){this.built=!0}get trainable(){return null!=this.layer&&this.layer.trainable}set trainable(t){null!=this.layer&&(this.layer.trainable=t)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(t){this.layer.setWeights(t)}getConfig(){const t={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},e=super.getConfig();return Object.assign(t,e),t}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),null!=this.layer&&this.layer.setFastWeightInitDuringBuild(t)}static fromConfig(t,e,s={}){const i=De(e.layer,s);delete e.layer;const n={layer:i};return Object.assign(n,e),new t(n)}}class Bn extends Mn{constructor(t){super(t),this.supportsMasking=!0}build(t){if((t=Ut(t)).length<3)throw new a(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(t)}`);this.inputSpec=[{shape:t}];const e=[t[0]].concat(t.slice(2));this.layer.built||(this.layer.build(e),this.layer.built=!0),super.build(t)}computeOutputShape(t){const e=[(t=Ut(t))[0]].concat(t.slice(2)),s=this.layer.computeOutputShape(e),i=t[1];return[s[0],i].concat(s.slice(1))}call(t,s){return e.tidy((()=>Di(((t,e)=>[Wt(this.layer.call(t,s)),[]]),t=Wt(t),[],!1,null,null,!1,!0)[1]))}}Bn.className="TimeDistributed",e.serialization.registerClass(Bn);class Wn extends Mn{constructor(t){super(t);const e=t.layer.getConfig(),s={};s.className=t.layer.getClassName(),s.config=e,this.forwardLayer=De(s),e.goBackwards=!0!==e.goBackwards;const i={};var n;if(i.className=t.layer.getClassName(),i.config=e,this.backwardLayer=De(i),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=void 0===t.mergeMode?"concat":t.mergeMode,n=this.mergeMode,N(_,"BidirectionalMergeMode",n),t.weights)throw new o("weights support is not implemented for Bidirectional layer yet.");this._stateful=t.layer.stateful,this.returnSequences=t.layer.returnSequences,this.returnState=t.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=t.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(t){this._trainable=t,null!=this.forwardLayer&&(this.forwardLayer.trainable=t),null!=this.backwardLayer&&(this.backwardLayer.trainable=t)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(t){const e=t.length,s=Math.floor(e/2);this.forwardLayer.setWeights(t.slice(0,s)),this.backwardLayer.setWeights(t.slice(s))}computeOutputShape(t){let e,s,i,n=this.forwardLayer.computeOutputShape(t);return Array.isArray(n)&&Array.isArray(n[0])||(n=[n]),this.returnState?(i=n.slice(1),e=n[0]):e=n[0],"concat"===this.mergeMode?(e[e.length-1]*=2,s=[e]):s=null==this.mergeMode?[e,e.slice()]:[e],this.returnState?null==this.mergeMode?s.concat(i).concat(i.slice()):[e].concat(i).concat(i.slice()):d(s)}apply(t,e){let s=null==e?null:e.initialState,i=null==e?null:e.constants;null==e&&(e={});const n=Ei(t,s,i,this.numConstants);if(t=n.inputs,s=n.initialState,i=n.constants,Array.isArray(t)&&(s=t.slice(1),t=t[0]),(null==s||0===s.length)&&null==i)return super.apply(t,e);const r=[],l=[];if(null!=s){const t=s.length;if(t%2>0)throw new a("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");e.initialState=s,r.push(...s);const i=s.map((t=>new Ht({shape:t.shape})));this.forwardLayer.stateSpec=i.slice(0,t/2),this.backwardLayer.stateSpec=i.slice(t/2),l.push(...i)}if(null!=i)throw new o("Support for constants in Bidirectional layers is not implemented yet.");const u=r[0]instanceof Zt;for(const t of r)if(t instanceof Zt!==u)throw new a("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(u){const s=[t].concat(r),i=this.inputSpec.concat(l),n=this.inputSpec;this.inputSpec=i;const a=super.apply(s,e);return this.inputSpec=n,a}return super.apply(t,e)}call(t,s){return e.tidy((()=>{const e=s.initialState;let n,r,a,o;if(null==e)n=this.forwardLayer.call(t,s),r=this.backwardLayer.call(t,s);else{const i=e.slice(0,e.length/2),a=e.slice(e.length/2);n=this.forwardLayer.call(t,Object.assign(s,{initialState:i})),r=this.backwardLayer.call(t,Object.assign(s,{initialState:a}))}return this.returnState&&(Array.isArray(n)&&(a=n.slice(1).concat(r.slice(1))),n=n[0],r=r[0]),this.returnSequences&&(r=i.reverse(r,1)),"concat"===this.mergeMode?o=rt([n,r]):"sum"===this.mergeMode?o=i.add(n,r):"ave"===this.mergeMode?o=i.mul(.5,i.add(n,r)):"mul"===this.mergeMode?o=i.mul(n,r):null==this.mergeMode&&(o=[n,r]),this.returnState?null==this.mergeMode?o.concat(a):[o].concat(a):o}))}resetStates(t){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(t){j(this.forwardLayer.name,(()=>{this.forwardLayer.build(t)})),j(this.backwardLayer.name,(()=>{this.backwardLayer.build(t)})),this.built=!0}computeMask(t,e){let s;if(Array.isArray(e)&&(e=e[0]),s=this.returnSequences?null==this.mergeMode?[e,e]:e:null==this.mergeMode?[null,null]:null,this.returnState){const t=this.forwardLayer.states.map((t=>null));return Array.isArray(s)?s.concat(t).concat(t):[s].concat(t).concat(t)}return s}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),null!=this.forwardLayer&&this.forwardLayer.setFastWeightInitDuringBuild(t),null!=this.backwardLayer&&this.backwardLayer.setFastWeightInitDuringBuild(t)}getConfig(){const t={mergeMode:this.mergeMode},e=super.getConfig();return Object.assign(t,e),t}static fromConfig(t,e){const s=De(e.layer);if(delete e.layer,null!=e.numConstants)throw new o("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const i=e;return i.layer=s,new t(i)}}function Un(t){return new Cn(t)}function Pn(t){return new In(t)}function jn(t){return new En(t)}function Vn(t){return new Fn(t)}function qn(t){return new _n(t)}function Jn(t){return new Nn(t)}function Hn(t){return new xn(t)}Wn.className="Bidirectional",e.serialization.registerClass(Wn);var Zn={__proto__:null,inputLayer:function(t){return new te(t)},elu:function(t){return new hi(t)},reLU:function(t){return new oi(t)},leakyReLU:function(t){return new li(t)},prelu:function(t){return new ui(t)},softmax:function(t){return new pi(t)},thresholdedReLU:function(t){return new ci(t)},conv1d:function(t){return new xi(t)},conv2d:function(t){return new ki(t)},conv2dTranspose:function(t){return new vi(t)},conv3d:function(t){return new Si(t)},conv3dTranspose:function(t){return new Ni(t)},separableConv2d:function(t){return new Ai(t)},cropping2D:function(t){return new Ii(t)},upSampling2d:function(t){return new Li(t)},depthwiseConv2d:function(t){return new Ti(t)},activation:function(t){return new Yi(t)},dense:function(t){return new Ki(t)},dropout:function(t){return new Hi(t)},spatialDropout1d:function(t){return new Zi(t)},flatten:function(t){return new Gi(t)},repeatVector:function(t){return new Xi(t)},reshape:function(t){return new Qi(t)},permute:function(t){return new tn(t)},embedding:function(t){return new sn(t)},add:function(t){return new rn(t)},average:function(t){return new on(t)},concatenate:function(t){return new hn(t)},maximum:function(t){return new ln(t)},minimum:function(t){return new un(t)},multiply:function(t){return new an(t)},dot:function(t){return new pn(t)},batchNormalization:function(t){return new bn(t)},layerNormalization:function(t){return new wn(t)},zeroPadding2d:function(t){return new zn(t)},averagePooling1d:Un,avgPool1d:function(t){return Un(t)},avgPooling1d:function(t){return Un(t)},averagePooling2d:Pn,avgPool2d:function(t){return Pn(t)},avgPooling2d:function(t){return Pn(t)},averagePooling3d:jn,avgPool3d:function(t){return jn(t)},avgPooling3d:function(t){return jn(t)},globalAveragePooling1d:function(t){return new $n(t)},globalAveragePooling2d:function(t){return new Rn(t)},globalMaxPooling1d:Vn,globalMaxPooling2d:qn,maxPooling1d:Jn,maxPooling2d:Hn,maxPooling3d:function(t){return new Tn(t)},gru:function(t){return new Mi(t)},gruCell:function(t){return new _i(t)},lstm:function(t){return new Wi(t)},lstmCell:function(t){return new Bi(t)},simpleRNN:function(t){return new Ri(t)},simpleRNNCell:function(t){return new Oi(t)},convLstm2d:function(t){return new Ji(t)},convLstm2dCell:function(t){return new qi(t)},rnn:function(t){return new $i(t)},stackedRNNCells:function(t){return new Ui(t)},bidirectional:function(t){return new Wn(t)},timeDistributed:function(t){return new Bn(t)},globalMaxPool1d:Vn,globalMaxPool2d:qn,maxPool1d:Jn,maxPool2d:Hn,Layer:Xt,RNN:$i,RNNCell:Fi,input:Os,gaussianNoise:function(t){return new dn(t)},gaussianDropout:function(t){return new gn(t)},alphaDropout:function(t){return new fn(t)},masking:function(t){return new en(t)}};var Kn={__proto__:null,binaryAccuracy:function(t,e){return je(t,e)},binaryCrossentropy:function(t,e){return Ze(t,e)},sparseCategoricalAccuracy:function(t,e){return Ke(t,e)},categoricalAccuracy:function(t,e){return Ve(t,e)},categoricalCrossentropy:function(t,e){return Ge(t,e)},precision:function(t,e){return Je(t,e)},recall:function(t,e){return He(t,e)},cosineProximity:function(t,e){return We(t,e)},meanAbsoluteError:function(t,e){return Oe(t,e)},meanAbsolutePercentageError:function(t,e){return Re(t,e)},MAPE:function(t,e){return Re(t,e)},mape:function(t,e){return Re(t,e)},meanSquaredError:function(t,e){return Fe(t,e)},MSE:function(t,e){return Fe(t,e)},mse:function(t,e){return Fe(t,e)}},Gn={__proto__:null,modelFromJSON:async function(t,s){"modelTopology"in t||(t={modelTopology:t});let i=t.modelTopology;null!=i.model_config&&(i=i.model_config);const n=De(us(i),s);if(null!=t.weightsManifest){const s=await e.io.loadWeights(t.weightsManifest,t.pathPrefix,n.weights.map((t=>t.originalName))),i={};for(const t of n.weights)i[t.originalName]=s[t.originalName];n.loadWeights(i),e.dispose(s)}return n}};var Yn={__proto__:null,l1l2:function(t){return new si(t)},l1:function(t){return ti(e=t),new si({l1:null!=e?e.l1:null,l2:0});var e},l2:function(t){return ti(e=t),new si({l2:null!=e?e.l2:null,l1:0});var e}};class Xn extends Ne{constructor(){super(...arguments),this.model=null}setModel(t){if(!(t instanceof Es))throw new Error("model must be a LayersModel, not some other Container");this.model=t}}function Qn(t,e){return t<e}function tr(t,e){return t>e}class er extends Xn{constructor(t){if(super(),null==t&&(t={}),t.restoreBestWeights)throw new o("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=t.monitor||"val_loss",this.minDelta=Math.abs(t.minDelta||0),this.patience=t.patience||0,this.verbose=t.verbose||0,this.mode=t.mode||"auto",this.baseline=t.baseline,-1===["auto","min","max"].indexOf(this.mode)&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),"min"===this.mode?this.monitorFunc=Qn:"max"===this.mode||-1!==this.monitor.indexOf("acc")?this.monitorFunc=tr:this.monitorFunc=Qn,this.monitorFunc===Qn&&(this.minDelta*=-1)}async onTrainBegin(t){this.wait=0,this.stoppedEpoch=0,null!=this.baseline?this.best=this.baseline:this.best=this.monitorFunc===Qn?1/0:-1/0}async onEpochEnd(t,e){await Se(e);const s=this.getMonitorValue(e);null!=s&&(this.monitorFunc(s-this.minDelta,this.best)?(this.best=s,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=t,this.model.stopTraining=!0)))}async onTrainEnd(t){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(t){null==t&&(t={});const e=t[this.monitor];return null==e&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(t)}`),e}}const sr={earlyStopping:function(t){return new er(t)}};t.Callback=Xn,t.CallbackList=Ce,t.CustomCallback=Ie,t.EarlyStopping=er,t.History=xe,t.InputSpec=Ht,t.LayerVariable=Vt,t.LayersModel=Es,t.RNN=$i,t.Sequential=Fs,t.SymbolicTensor=Zt,t.callbacks=sr,t.constraints=we,t.initializers=ke,t.input=Os,t.layers=Zn,t.loadLayersModel=function(t,e){return null==e&&(e={}),$s(t,e)},t.metrics=Kn,t.model=function(t){return new Es(t)},t.models=Gn,t.registerCallbackConstructor=function(t,e){Te.registerCallbackConstructor(t,e)},t.regularizers=Yn,t.sequential=function(t){return new Fs(t)},t.version_layers=cs,Object.defineProperty(t,"__esModule",{value:!0})}));
18//# sourceMappingURL=tf-layers.es2017.min.js.map