UNPKG

28.1 kBJavaScriptView Raw
1"use strict";
2/**
3 * @license
4 * Copyright (c) 2016 The Polymer Project Authors. All rights reserved.
5 * This code may only be used under the BSD style license found at
6 * http://polymer.github.io/LICENSE.txt
7 * The complete set of authors may be found at
8 * http://polymer.github.io/AUTHORS.txt
9 * The complete set of contributors may be found at
10 * http://polymer.github.io/CONTRIBUTORS.txt
11 * Code distributed by Google as part of the polymer project is also
12 * subject to an additional IP rights grant found at
13 * http://polymer.github.io/PATENTS.txt
14 */
15var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
16 return new (P || (P = Promise))(function (resolve, reject) {
17 function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
18 function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
19 function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
20 step((generator = generator.apply(thisArg, _arguments || [])).next());
21 });
22};
23Object.defineProperty(exports, "__esModule", { value: true });
24const cancel_token_1 = require("cancel-token");
25const path = require("path");
26const analyzer_1 = require("../core/analyzer");
27const css_custom_property_scanner_1 = require("../css/css-custom-property-scanner");
28const css_parser_1 = require("../css/css-parser");
29const html_element_reference_scanner_1 = require("../html/html-element-reference-scanner");
30const html_import_scanner_1 = require("../html/html-import-scanner");
31const html_parser_1 = require("../html/html-parser");
32const html_script_scanner_1 = require("../html/html-script-scanner");
33const html_style_scanner_1 = require("../html/html-style-scanner");
34const class_scanner_1 = require("../javascript/class-scanner");
35const function_scanner_1 = require("../javascript/function-scanner");
36const html_template_literal_scanner_1 = require("../javascript/html-template-literal-scanner");
37const javascript_export_scanner_1 = require("../javascript/javascript-export-scanner");
38const javascript_import_scanner_1 = require("../javascript/javascript-import-scanner");
39const javascript_parser_1 = require("../javascript/javascript-parser");
40const namespace_scanner_1 = require("../javascript/namespace-scanner");
41const json_parser_1 = require("../json/json-parser");
42const model_1 = require("../model/model");
43const document_1 = require("../parser/document");
44const behavior_scanner_1 = require("../polymer/behavior-scanner");
45const css_import_scanner_1 = require("../polymer/css-import-scanner");
46const dom_module_scanner_1 = require("../polymer/dom-module-scanner");
47const polymer_core_feature_scanner_1 = require("../polymer/polymer-core-feature-scanner");
48const polymer_element_scanner_1 = require("../polymer/polymer-element-scanner");
49const pseudo_element_scanner_1 = require("../polymer/pseudo-element-scanner");
50const scan_1 = require("../scanning/scan");
51const package_url_resolver_1 = require("../url-loader/package-url-resolver");
52const analysis_cache_1 = require("./analysis-cache");
53exports.analyzerVersion = require('../../package.json').version;
54/**
55 * An analysis of a set of files at a specific point-in-time with respect to
56 * updates to those files. New files can be added to an existing context, but
57 * updates to files will cause a fork of the context with new analysis results.
58 *
59 * All file contents and analysis results are consistent within a single
60 * anaysis context. A context is forked via either the fileChanged or
61 * clearCaches methods.
62 *
63 * For almost all purposes this is an entirely internal implementation detail.
64 * An Analyzer instance has a reference to its current context, so it will
65 * appear to be statefull with respect to file updates.
66 */
67class AnalysisContext {
68 constructor(options, cache, generation) {
69 this.parsers = new Map([
70 ['html', new html_parser_1.HtmlParser()],
71 ['js', new javascript_parser_1.JavaScriptParser()],
72 ['mjs', new javascript_parser_1.JavaScriptParser()],
73 ['css', new css_parser_1.CssParser()],
74 ['json', new json_parser_1.JsonParser()],
75 ]);
76 this._languageAnalyzers = new Map([
77 // TODO(rictic): add typescript language analyzer back after investigating
78 // https://github.com/Polymer/polymer-analyzer/issues/623
79 ]);
80 this.loader = options.urlLoader;
81 this.resolver = options.urlResolver || new package_url_resolver_1.PackageUrlResolver();
82 this.parsers = options.parsers || this.parsers;
83 this._lazyEdges = options.lazyEdges;
84 this._scanners =
85 options.scanners || AnalysisContext.getDefaultScanners(options);
86 this._cache = cache || new analysis_cache_1.AnalysisCache();
87 this._generation = generation || 0;
88 this._analysisComplete = Promise.resolve();
89 }
90 static getDefaultScanners(options) {
91 return new Map([
92 [
93 'html',
94 [
95 new html_import_scanner_1.HtmlImportScanner(options.lazyEdges),
96 new html_script_scanner_1.HtmlScriptScanner(),
97 new html_style_scanner_1.HtmlStyleScanner(),
98 new dom_module_scanner_1.DomModuleScanner(),
99 new css_import_scanner_1.CssImportScanner(),
100 new html_element_reference_scanner_1.HtmlCustomElementReferenceScanner(),
101 new pseudo_element_scanner_1.PseudoElementScanner(),
102 ]
103 ],
104 [
105 'js',
106 [
107 new polymer_element_scanner_1.PolymerElementScanner(),
108 new polymer_core_feature_scanner_1.PolymerCoreFeatureScanner(),
109 new behavior_scanner_1.BehaviorScanner(),
110 new namespace_scanner_1.NamespaceScanner(),
111 new function_scanner_1.FunctionScanner(),
112 new class_scanner_1.ClassScanner(),
113 new javascript_import_scanner_1.JavaScriptImportScanner({ moduleResolution: options.moduleResolution }),
114 new javascript_export_scanner_1.JavaScriptExportScanner(),
115 new html_template_literal_scanner_1.InlineHtmlDocumentScanner(),
116 ]
117 ],
118 ['css', [new css_custom_property_scanner_1.CssCustomPropertyScanner()]]
119 ]);
120 }
121 /**
122 * Returns a copy of this cache context with proper cache invalidation.
123 */
124 filesChanged(urls) {
125 const newCache = this._cache.invalidate(this.resolveUserInputUrls(urls));
126 return this._fork(newCache);
127 }
128 /**
129 * Implements Analyzer#analyze, see its docs.
130 */
131 analyze(urls, cancelToken) {
132 return __awaiter(this, void 0, void 0, function* () {
133 const resolvedUrls = this.resolveUserInputUrls(urls);
134 // 1. Await current analysis if there is one, so we can check to see if it
135 // has all of the requested URLs.
136 yield this._analysisComplete;
137 // 2. Check to see if we have all of the requested documents
138 const hasAllDocuments = resolvedUrls.every((url) => this._cache.analyzedDocuments.get(url) != null);
139 if (hasAllDocuments) {
140 // all requested URLs are present, return the existing context
141 return this;
142 }
143 // 3. Some URLs are new, so fork, but don't invalidate anything
144 const newCache = this._cache.invalidate([]);
145 const newContext = this._fork(newCache);
146 return newContext._analyze(resolvedUrls, cancelToken);
147 });
148 }
149 /**
150 * Internal analysis method called when we know we need to fork.
151 */
152 _analyze(resolvedUrls, cancelToken) {
153 return __awaiter(this, void 0, void 0, function* () {
154 const analysisComplete = (() => __awaiter(this, void 0, void 0, function* () {
155 // 1. Load and scan all root documents
156 const maybeScannedDocuments = yield Promise.all(resolvedUrls.map((url) => __awaiter(this, void 0, void 0, function* () {
157 try {
158 const scannedResult = yield this.scan(url, cancelToken);
159 if (scannedResult.successful === true) {
160 this._cache.failedDocuments.delete(url);
161 return scannedResult.value;
162 }
163 else {
164 this._cache.failedDocuments.set(url, scannedResult.error);
165 return undefined;
166 }
167 }
168 catch (e) {
169 if (cancel_token_1.isCancel(e)) {
170 return;
171 }
172 // This is a truly unexpected error. We should fail.
173 throw e;
174 }
175 })));
176 const scannedDocuments = maybeScannedDocuments.filter((d) => d !== undefined);
177 // 2. Run per-document resolution
178 const documents = scannedDocuments.map((d) => this.getDocument(d.url));
179 // TODO(justinfagnani): instead of the above steps, do:
180 // 1. Load and run prescanners
181 // 2. Run global analyzers (_languageAnalyzers now, but it doesn't need to
182 // be separated by file type)
183 // 3. Run per-document scanners and resolvers
184 return documents;
185 }))();
186 this._analysisComplete = analysisComplete.then((_) => { });
187 yield this._analysisComplete;
188 return this;
189 });
190 }
191 /**
192 * Gets an analyzed Document from the document cache. This is only useful for
193 * Analyzer plugins. You almost certainly want to use `analyze()` instead.
194 *
195 * If a document has been analyzed, it returns the analyzed Document. If not
196 * the scanned document cache is used and a new analyzed Document is returned.
197 * If a file is in neither cache, it returns `undefined`.
198 */
199 getDocument(resolvedUrl) {
200 const cachedWarning = this._cache.failedDocuments.get(resolvedUrl);
201 if (cachedWarning) {
202 return cachedWarning;
203 }
204 const cachedResult = this._cache.analyzedDocuments.get(resolvedUrl);
205 if (cachedResult) {
206 return cachedResult;
207 }
208 const scannedDocument = this._cache.scannedDocuments.get(resolvedUrl);
209 if (!scannedDocument) {
210 return makeRequestedWithoutLoadingWarning(resolvedUrl);
211 }
212 const extension = path.extname(resolvedUrl).substring(1);
213 const languageAnalyzer = this._languageAnalyzers.get(extension);
214 let analysisResult;
215 if (languageAnalyzer) {
216 analysisResult = languageAnalyzer.analyze(scannedDocument.url);
217 }
218 const document = new model_1.Document(scannedDocument, this, analysisResult);
219 this._cache.analyzedDocuments.set(resolvedUrl, document);
220 this._cache.analyzedDocumentPromises.getOrCompute(resolvedUrl, () => __awaiter(this, void 0, void 0, function* () { return document; }));
221 document.resolve();
222 return document;
223 }
224 /**
225 * This is only useful for Analyzer plugins.
226 *
227 * If a url has been scanned, returns the ScannedDocument.
228 */
229 _getScannedDocument(resolvedUrl) {
230 return this._cache.scannedDocuments.get(resolvedUrl);
231 }
232 /**
233 * Clear all cached information from this analyzer instance.
234 *
235 * Note: if at all possible, instead tell the analyzer about the specific
236 * files that changed rather than clearing caches like this. Caching provides
237 * large performance gains.
238 */
239 clearCaches() {
240 return this._fork(new analysis_cache_1.AnalysisCache());
241 }
242 /**
243 * Returns a copy of the context but with optional replacements of cache or
244 * constructor options.
245 *
246 * Note: this feature is experimental.
247 */
248 _fork(cache, options) {
249 const contextOptions = {
250 lazyEdges: this._lazyEdges,
251 parsers: this.parsers,
252 scanners: this._scanners,
253 urlLoader: this.loader,
254 urlResolver: this.resolver,
255 };
256 if (options && options.urlLoader) {
257 contextOptions.urlLoader = options.urlLoader;
258 }
259 if (!cache) {
260 cache = this._cache.invalidate([]);
261 }
262 const copy = new AnalysisContext(contextOptions, cache, this._generation + 1);
263 return copy;
264 }
265 /**
266 * Scans a file locally, that is for features that do not depend
267 * on this files imports. Local features can be cached even when
268 * imports are invalidated. This method does not trigger transitive
269 * scanning, _scan() does that.
270 *
271 * TODO(justinfagnani): consider renaming this to something like
272 * _preScan, since about the only useful things it can find are
273 * imports, exports and other syntactic structures.
274 */
275 _scanLocal(resolvedUrl, cancelToken) {
276 return __awaiter(this, void 0, void 0, function* () {
277 return this._cache.scannedDocumentPromises.getOrCompute(resolvedUrl, () => __awaiter(this, void 0, void 0, function* () {
278 const parsedDocResult = yield this._parse(resolvedUrl, cancelToken);
279 if (parsedDocResult.successful === false) {
280 this._cache.dependencyGraph.rejectDocument(resolvedUrl, new model_1.WarningCarryingException(parsedDocResult.error));
281 return parsedDocResult;
282 }
283 const parsedDoc = parsedDocResult.value;
284 try {
285 const scannedDocument = yield this._scanDocument(parsedDoc);
286 const imports = scannedDocument.getNestedFeatures().filter((e) => e instanceof model_1.ScannedImport);
287 // Update dependency graph
288 const importUrls = filterOutUndefineds(imports.map((i) => i.url === undefined ?
289 undefined :
290 this.resolver.resolve(parsedDoc.baseUrl, i.url, i)));
291 this._cache.dependencyGraph.addDocument(resolvedUrl, importUrls);
292 return { successful: true, value: scannedDocument };
293 }
294 catch (e) {
295 const message = (e && e.message) || `Unknown error during scan.`;
296 const warning = new model_1.Warning({
297 code: 'could-not-scan',
298 message,
299 parsedDocument: parsedDoc,
300 severity: model_1.Severity.ERROR,
301 sourceRange: {
302 file: resolvedUrl,
303 start: { column: 0, line: 0 },
304 end: { column: 0, line: 0 }
305 }
306 });
307 this._cache.dependencyGraph.rejectDocument(resolvedUrl, new model_1.WarningCarryingException(warning));
308 return { successful: false, error: warning };
309 }
310 }), cancelToken);
311 });
312 }
313 /**
314 * Scan a toplevel document and all of its transitive dependencies.
315 */
316 scan(resolvedUrl, cancelToken) {
317 return __awaiter(this, void 0, void 0, function* () {
318 return this._cache.dependenciesScannedPromises.getOrCompute(resolvedUrl, () => __awaiter(this, void 0, void 0, function* () {
319 const scannedDocumentResult = yield this._scanLocal(resolvedUrl, cancelToken);
320 if (scannedDocumentResult.successful === false) {
321 return scannedDocumentResult;
322 }
323 const scannedDocument = scannedDocumentResult.value;
324 const imports = scannedDocument.getNestedFeatures().filter((e) => e instanceof model_1.ScannedImport);
325 // Scan imports
326 for (const scannedImport of imports) {
327 if (scannedImport.url === undefined) {
328 continue;
329 }
330 const importUrl = this.resolver.resolve(scannedDocument.document.baseUrl, scannedImport.url, scannedImport);
331 if (importUrl === undefined) {
332 continue;
333 }
334 // Request a scan of `importUrl` but do not wait for the results to
335 // avoid deadlock in the case of cycles. Later we use the
336 // DependencyGraph to wait for all transitive dependencies to load.
337 this.scan(importUrl, cancelToken)
338 .then((result) => {
339 if (result.successful === true) {
340 return;
341 }
342 scannedImport.error = result.error;
343 })
344 .catch((e) => {
345 if (cancel_token_1.isCancel(e)) {
346 return;
347 }
348 throw e;
349 });
350 }
351 yield this._cache.dependencyGraph.whenReady(resolvedUrl);
352 return scannedDocumentResult;
353 }), cancelToken);
354 });
355 }
356 /**
357 * Scans a ParsedDocument.
358 */
359 _scanDocument(document, maybeAttachedComment, maybeContainingDocument) {
360 return __awaiter(this, void 0, void 0, function* () {
361 const { features: scannedFeatures, warnings } = yield this._getScannedFeatures(document);
362 // If there's an HTML comment that applies to this document then we assume
363 // that it applies to the first feature.
364 const firstScannedFeature = scannedFeatures[0];
365 if (firstScannedFeature && firstScannedFeature instanceof model_1.ScannedElement) {
366 firstScannedFeature.applyHtmlComment(maybeAttachedComment, maybeContainingDocument);
367 }
368 const scannedDocument = new model_1.ScannedDocument(document, scannedFeatures, warnings);
369 if (!scannedDocument.isInline) {
370 if (this._cache.scannedDocuments.has(scannedDocument.url)) {
371 throw new Error('Scanned document already in cache. This should never happen.');
372 }
373 this._cache.scannedDocuments.set(scannedDocument.url, scannedDocument);
374 }
375 yield this._scanInlineDocuments(scannedDocument);
376 return scannedDocument;
377 });
378 }
379 _getScannedFeatures(document) {
380 return __awaiter(this, void 0, void 0, function* () {
381 const scanners = this._scanners.get(document.type);
382 if (scanners) {
383 try {
384 return yield scan_1.scan(document, scanners);
385 }
386 catch (e) {
387 if (e instanceof model_1.WarningCarryingException) {
388 throw e;
389 }
390 const message = e == null ? `Unknown error while scanning.` :
391 `Error while scanning: ${String(e)}`;
392 throw new model_1.WarningCarryingException(new model_1.Warning({
393 code: 'internal-scanning-error',
394 message,
395 parsedDocument: document,
396 severity: model_1.Severity.ERROR,
397 sourceRange: {
398 file: document.url,
399 start: { column: 0, line: 0 },
400 end: { column: 0, line: 0 },
401 }
402 }));
403 }
404 }
405 return { features: [], warnings: [] };
406 });
407 }
408 _scanInlineDocuments(containingDocument) {
409 return __awaiter(this, void 0, void 0, function* () {
410 for (const feature of containingDocument.features) {
411 if (!(feature instanceof model_1.ScannedInlineDocument)) {
412 continue;
413 }
414 const locationOffset = {
415 line: feature.locationOffset.line,
416 col: feature.locationOffset.col,
417 filename: containingDocument.url
418 };
419 try {
420 const parsedDoc = this._parseContents(feature.type, feature.contents, containingDocument.url, {
421 locationOffset,
422 astNode: feature.astNode,
423 baseUrl: containingDocument.document.baseUrl
424 });
425 const scannedDoc = yield this._scanDocument(parsedDoc, feature.attachedComment, containingDocument.document);
426 feature.scannedDocument = scannedDoc;
427 }
428 catch (err) {
429 if (err instanceof model_1.WarningCarryingException) {
430 containingDocument.warnings.push(err.warning);
431 continue;
432 }
433 throw err;
434 }
435 }
436 });
437 }
438 /**
439 * Returns `true` if the provided resolved URL can be loaded. Obeys the
440 * semantics defined by `UrlLoader` and should only be used to check
441 * resolved URLs.
442 */
443 canLoad(resolvedUrl) {
444 return this.loader.canLoad(resolvedUrl);
445 }
446 /**
447 * Loads the content at the provided resolved URL. Obeys the semantics
448 * defined by `UrlLoader` and should only be used to attempt to load resolved
449 * URLs.
450 *
451 * Currently does no caching. If the provided contents are given then they
452 * are used instead of hitting the UrlLoader (e.g. when you have in-memory
453 * contents that should override disk).
454 */
455 load(resolvedUrl) {
456 return __awaiter(this, void 0, void 0, function* () {
457 if (!this.canLoad(resolvedUrl)) {
458 return {
459 successful: false,
460 error: `Configured URL Loader can not load URL ${resolvedUrl}`
461 };
462 }
463 try {
464 const value = yield this.loader.load(resolvedUrl);
465 return { successful: true, value };
466 }
467 catch (e) {
468 const message = (e && e.message) || `Unknown failure while loading.`;
469 return { successful: false, error: message };
470 }
471 });
472 }
473 /**
474 * Caching + loading wrapper around _parseContents.
475 */
476 _parse(resolvedUrl, cancelToken) {
477 return __awaiter(this, void 0, void 0, function* () {
478 return this._cache.parsedDocumentPromises.getOrCompute(resolvedUrl, () => __awaiter(this, void 0, void 0, function* () {
479 const result = yield this.load(resolvedUrl);
480 if (!result.successful) {
481 return {
482 successful: false,
483 error: new model_1.Warning({
484 code: 'could-not-load',
485 parsedDocument: new document_1.UnparsableParsedDocument(resolvedUrl, ''),
486 severity: model_1.Severity.ERROR,
487 sourceRange: {
488 file: resolvedUrl,
489 start: { column: 0, line: 0 },
490 end: { column: 0, line: 0 }
491 },
492 message: result.error
493 })
494 };
495 }
496 const extension = path.extname(resolvedUrl).substring(1);
497 try {
498 const parsedDoc = this._parseContents(extension, result.value, resolvedUrl);
499 return { successful: true, value: parsedDoc };
500 }
501 catch (e) {
502 if (e instanceof model_1.WarningCarryingException) {
503 return { successful: false, error: e.warning };
504 }
505 const message = (e && e.message) || `Unknown error while parsing.`;
506 return {
507 successful: false,
508 error: new model_1.Warning({
509 code: 'could-not-parse',
510 parsedDocument: new document_1.UnparsableParsedDocument(resolvedUrl, result.value),
511 severity: model_1.Severity.ERROR,
512 sourceRange: {
513 file: resolvedUrl,
514 start: { column: 0, line: 0 },
515 end: { column: 0, line: 0 }
516 },
517 message
518 })
519 };
520 }
521 }), cancelToken);
522 });
523 }
524 /**
525 * Parse the given string into the Abstract Syntax Tree (AST) corresponding
526 * to its type.
527 */
528 _parseContents(type, contents, url, inlineInfo) {
529 const parser = this.parsers.get(type);
530 if (parser == null) {
531 throw new analyzer_1.NoKnownParserError(`No parser for for file type ${type}`);
532 }
533 try {
534 return parser.parse(contents, url, this.resolver, inlineInfo);
535 }
536 catch (error) {
537 if (error instanceof model_1.WarningCarryingException) {
538 throw error;
539 }
540 const parsedDocument = new document_1.UnparsableParsedDocument(url, contents);
541 const message = error == null ? `Unable to parse as ${type}` :
542 `Unable to parse as ${type}: ${error}`;
543 throw new model_1.WarningCarryingException(new model_1.Warning({
544 parsedDocument,
545 code: 'parse-error',
546 message,
547 severity: model_1.Severity.ERROR,
548 sourceRange: { file: url, start: { line: 0, column: 0 }, end: { line: 0, column: 0 } }
549 }));
550 }
551 }
552 /**
553 * Resolves all resolvable URLs in the list, removes unresolvable ones.
554 */
555 resolveUserInputUrls(urls) {
556 return filterOutUndefineds(urls.map((u) => this.resolver.resolve(u)));
557 }
558}
559exports.AnalysisContext = AnalysisContext;
560function filterOutUndefineds(arr) {
561 return arr.filter((t) => t !== undefined);
562}
563/**
564 * A warning for a weird situation that should never happen.
565 *
566 * Before calling getDocument(), which is synchronous, a caller must first
567 * have finished loading and scanning, as those phases are asynchronous.
568 *
569 * So we need to construct a warning, but we don't have a parsed document,
570 * so we construct this weird fake one. This is such a rare case that it's
571 * worth going out of our way here so that warnings can uniformly expect to
572 * have documents.
573 */
574function makeRequestedWithoutLoadingWarning(resolvedUrl) {
575 const parsedDocument = new document_1.UnparsableParsedDocument(resolvedUrl, '');
576 return new model_1.Warning({
577 sourceRange: {
578 file: resolvedUrl,
579 start: { line: 0, column: 0 },
580 end: { line: 0, column: 0 }
581 },
582 code: 'unable-to-analyze',
583 message: `[Internal Error] Document was requested ` +
584 `before loading and scanning finished. This usually indicates an ` +
585 `anomalous error during loading or analysis. Please file a bug at ` +
586 `https://github.com/Polymer/polymer-analyzer/issues/new with info ` +
587 `on the source code that caused this. ` +
588 `Polymer Analyzer version: ${exports.analyzerVersion}`,
589 severity: model_1.Severity.ERROR,
590 parsedDocument
591 });
592}
593//# sourceMappingURL=analysis-context.js.map
\No newline at end of file