/*!
 * Copyright 2014 Google Inc. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/// <reference types="node" />
import { ServiceObject, ResponseCallback, SetMetadataResponse } from '@google-cloud/common';
import { ResourceStream } from '@google-cloud/paginator';
import { BigQuery, Job, Dataset, Query, SimpleQueryRowsResponse, SimpleQueryRowsCallback, ResourceCallback, RequestCallback, PagedResponse, PagedCallback, JobRequest, PagedRequest } from '.';
import { Duplex, Writable } from 'stream';
import { JobMetadata } from './job';
import bigquery from './types';
import { IntegerTypeCastOptions } from './bigquery';
import { RowQueue } from './rowQueue';
export interface File {
    bucket: any;
    kmsKeyName?: string;
    userProject?: string;
    name: string;
    generation?: number;
}
export type JobMetadataCallback = RequestCallback<JobMetadata>;
export type JobMetadataResponse = [JobMetadata];
export type RowMetadata = any;
export type InsertRowsOptions = bigquery.ITableDataInsertAllRequest & {
    createInsertId?: boolean;
    partialRetries?: number;
    raw?: boolean;
    schema?: string | {};
};
export type InsertRowsResponse = [
    bigquery.ITableDataInsertAllResponse | bigquery.ITable
];
export type InsertRowsCallback = RequestCallback<bigquery.ITableDataInsertAllResponse | bigquery.ITable>;
export type RowsResponse = PagedResponse<RowMetadata, GetRowsOptions, bigquery.ITableDataList | bigquery.ITable>;
export type RowsCallback = PagedCallback<RowMetadata, GetRowsOptions, bigquery.ITableDataList | bigquery.ITable>;
export interface InsertRow {
    insertId?: string;
    json?: bigquery.IJsonObject;
}
export type TableRow = bigquery.ITableRow;
export type TableRowField = bigquery.ITableCell;
export type TableRowValue = string | TableRow;
export type GetRowsOptions = PagedRequest<bigquery.tabledata.IListParams> & {
    wrapIntegers?: boolean | IntegerTypeCastOptions;
};
export type JobLoadMetadata = JobRequest<bigquery.IJobConfigurationLoad> & {
    format?: string;
};
export type CreateExtractJobOptions = JobRequest<bigquery.IJobConfigurationExtract> & {
    format?: 'CSV' | 'JSON' | 'AVRO' | 'PARQUET' | 'ORC';
    gzip?: boolean;
};
export type JobResponse = [Job, bigquery.IJob];
export type JobCallback = ResourceCallback<Job, bigquery.IJob>;
export type CreateCopyJobMetadata = CopyTableMetadata;
export type SetTableMetadataOptions = TableMetadata;
export type CopyTableMetadata = JobRequest<bigquery.IJobConfigurationTableCopy>;
export type TableMetadata = bigquery.ITable & {
    name?: string;
    schema?: string | TableField[] | TableSchema;
    partitioning?: string;
    view?: string | ViewDefinition;
};
export type ViewDefinition = bigquery.IViewDefinition;
export type FormattedMetadata = bigquery.ITable;
export type TableSchema = bigquery.ITableSchema;
export type TableField = bigquery.ITableFieldSchema;
export interface PartialInsertFailure {
    message: string;
    reason: string;
    row: RowMetadata;
}
export type Policy = bigquery.IPolicy;
export type GetPolicyOptions = bigquery.IGetPolicyOptions;
export type SetPolicyOptions = Omit<bigquery.ISetIamPolicyRequest, 'policy'>;
export type PolicyRequest = bigquery.IGetIamPolicyRequest;
export type PolicyResponse = [Policy];
export type PolicyCallback = RequestCallback<PolicyResponse>;
export type PermissionsResponse = [bigquery.ITestIamPermissionsResponse];
export type PermissionsCallback = RequestCallback<PermissionsResponse>;
export interface InsertStreamOptions {
    insertRowsOptions?: InsertRowsOptions;
    batchOptions?: RowBatchOptions;
}
export interface RowBatchOptions {
    maxBytes: number;
    maxRows: number;
    maxMilliseconds: number;
}
export interface TableOptions {
    location?: string;
}
/**
 * Table objects are returned by methods such as
 * {@link Dataset#table}, {@link Dataset#createTable}, and
 * {@link Dataset#getTables}.
 *
 * @class
 * @param {Dataset} dataset {@link Dataset} instance.
 * @param {string} id The ID of the table.
 * @param {object} [options] Table options.
 * @param {string} [options.location] The geographic location of the table, by
 *      default this value is inherited from the dataset. This can be used to
 *      configure the location of all jobs created through a table instance. It
 *      cannot be used to set the actual location of the table. This value will
 *      be superseded by any API responses containing location data for the
 *      table.
 *
 * @example
 * ```
 * const {BigQuery} = require('@google-cloud/bigquery');
 * const bigquery = new BigQuery();
 * const dataset = bigquery.dataset('my-dataset');
 *
 * const table = dataset.table('my-table');
 * ```
 */
declare class Table extends ServiceObject {
    dataset: Dataset;
    bigQuery: BigQuery;
    location?: string;
    rowQueue?: RowQueue;
    createReadStream(options?: GetRowsOptions): ResourceStream<RowMetadata>;
    constructor(dataset: Dataset, id: string, options?: TableOptions);
    /**
     * Convert a comma-separated name:type string to a table schema object.
     *
     * @static
     * @private
     *
     * @param {string} str Comma-separated schema string.
     * @returns {object} Table schema in the format the API expects.
     */
    static createSchemaFromString_(str: string): TableSchema;
    /**
     * Convert a row entry from native types to their encoded types that the API
     * expects.
     *
     * @static
     * @private
     *
     * @param {*} value The value to be converted.
     * @returns {*} The converted value.
     */
    static encodeValue_(value?: {} | null): {} | null;
    /**
     * @private
     */
    static formatMetadata_(options: TableMetadata): FormattedMetadata;
    /**
     * @callback JobMetadataCallback
     * @param {?Error} err Request error, if any.
     * @param {object} apiResponse The full API response.
     */
    /**
     * @typedef {array} JobMetadataResponse
     * @property {object} 0 The full API response.
     */
    /**
     * Copy data from one table to another, optionally creating that table.
     *
     * @param {Table} destination The destination table.
     * @param {object} [metadata] Metadata to set with the copy operation. The
     *     metadata object should be in the format of a
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy| `JobConfigurationTableCopy`}
     * object.
     *     object.
     * @param {string} [metadata.jobId] Custom id for the underlying job.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the underlying job
     *     id.
     * @param {JobMetadataCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobMetadataResponse>}
     *
     * @throws {Error} If a destination other than a Table object is provided.
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     *
     * const table = dataset.table('my-table');
     * const yourTable = dataset.table('your-table');
     *
     * table.copy(yourTable, (err, apiResponse) => {});
     *
     * //-
     * // See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy
     * // for all available options.
     * //-
     * const metadata = {
     *   createDisposition: 'CREATE_NEVER',
     *   writeDisposition: 'WRITE_TRUNCATE'
     * };
     *
     * table.copy(yourTable, metadata, (err, apiResponse) => {});
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.copy(yourTable, metadata).then((data) => {
     *   const apiResponse = data[0];
     * });
     * ```
     */
    copy(destination: Table, metadata?: CopyTableMetadata): Promise<JobMetadataResponse>;
    copy(destination: Table, metadata: CopyTableMetadata, callback: JobMetadataCallback): void;
    copy(destination: Table, callback: JobMetadataCallback): void;
    /**
     * @callback JobMetadataCallback
     * @param {?Error} err Request error, if any.
     * @param {object} apiResponse The full API response.
     */
    /**
     * @typedef {array} JobMetadataResponse
     * @property {object} 0 The full API response.
     */
    /**
     * Copy data from multiple tables into this table.
     *
     * @param {Table|Table[]} sourceTables The
     *     source table(s) to copy data from.
     * @param {object=} metadata Metadata to set with the copy operation. The
     *     metadata object should be in the format of a
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy| `JobConfigurationTableCopy`}
     *     object.
     * @param {string} [metadata.jobId] Custom id for the underlying job.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the underlying job
     *     id.
     * @param {JobMetadataCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobMetadataResponse>}
     *
     * @throws {Error} If a source other than a Table object is provided.
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * const sourceTables = [
     *   dataset.table('your-table'),
     *   dataset.table('your-second-table')
     * ];
     *
     * table.copyFrom(sourceTables, (err, apiResponse) => {});
     *
     * //-
     * // See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy
     * // for all available options.
     * //-
     * const metadata = {
     *   createDisposition: 'CREATE_NEVER',
     *   writeDisposition: 'WRITE_TRUNCATE'
     * };
     *
     * table.copyFrom(sourceTables, metadata, (err, apiResponse) => {});
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.copyFrom(sourceTables, metadata).then((data) => {
     *   const apiResponse = data[0];
     * });
     * ```
     */
    copyFrom(sourceTables: Table | Table[], metadata?: CopyTableMetadata): Promise<JobMetadataResponse>;
    copyFrom(sourceTables: Table | Table[], metadata: CopyTableMetadata, callback: JobMetadataCallback): void;
    copyFrom(sourceTables: Table | Table[], callback: JobMetadataCallback): void;
    /**
     * Copy data from one table to another, optionally creating that table.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert| Jobs: insert API Documentation}
     *
     * @param {Table} destination The destination table.
     * @param {object} [metadata] Metadata to set with the copy operation. The
     *     metadata object should be in the format of a
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy| `JobConfigurationTableCopy`}
     *     object.
     * @param {string} [metadata.jobId] Custom job id.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the job id.
     * @param {JobCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {Job} callback.job The job used to copy your table.
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobResponse>}
     *
     * @throws {Error} If a destination other than a Table object is provided.
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * const yourTable = dataset.table('your-table');
     * table.createCopyJob(yourTable, (err, job, apiResponse) => {
     *   // `job` is a Job object that can be used to check the status of the
     *   // request.
     * });
     *
     * //-
     * // See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy
     * // for all available options.
     * //-
     * const metadata = {
     *   createDisposition: 'CREATE_NEVER',
     *   writeDisposition: 'WRITE_TRUNCATE'
     * };
     *
     * table.createCopyJob(yourTable, metadata, (err, job, apiResponse) => {});
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.createCopyJob(yourTable, metadata).then((data) => {
     *   const job = data[0];
     *   const apiResponse = data[1];
     * });
     * ```
     */
    createCopyJob(destination: Table, metadata?: CreateCopyJobMetadata): Promise<JobResponse>;
    createCopyJob(destination: Table, metadata: CreateCopyJobMetadata, callback: JobCallback): void;
    createCopyJob(destination: Table, callback: JobCallback): void;
    /**
     * Copy data from multiple tables into this table.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert| Jobs: insert API Documentation}
     *
     * @param {Table|Table[]} sourceTables The
     *     source table(s) to copy data from.
     * @param {object} [metadata] Metadata to set with the copy operation. The
     *     metadata object should be in the format of a
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy| `JobConfigurationTableCopy`}
     *     object.
     * @param {string} [metadata.jobId] Custom job id.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the job id.
     * @param {JobCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {Job} callback.job The job used to copy your table.
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobResponse>}
     *
     * @throws {Error} If a source other than a Table object is provided.
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * const sourceTables = [
     *   dataset.table('your-table'),
     *   dataset.table('your-second-table')
     * ];
     *
     * const callback = (err, job, apiResponse) => {
     *   // `job` is a Job object that can be used to check the status of the
     *   // request.
     * };
     *
     * table.createCopyFromJob(sourceTables, callback);
     *
     * //-
     * // See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy
     * // for all available options.
     * //-
     * const metadata = {
     *   createDisposition: 'CREATE_NEVER',
     *   writeDisposition: 'WRITE_TRUNCATE'
     * };
     *
     * table.createCopyFromJob(sourceTables, metadata, callback);
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.createCopyFromJob(sourceTables, metadata).then((data) => {
     *   const job = data[0];
     *   const apiResponse = data[1];
     * });
     * ```
     */
    createCopyFromJob(source: Table | Table[], metadata?: CopyTableMetadata): Promise<JobResponse>;
    createCopyFromJob(source: Table | Table[], metadata: CopyTableMetadata, callback: JobCallback): void;
    createCopyFromJob(source: Table | Table[], callback: JobCallback): void;
    /**
     * Export table to Cloud Storage.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert| Jobs: insert API Documentation}
     *
     * @param {string|File} destination Where the file should be exported
     *     to. A string or a {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}
     * object.
     * @param {object=} options - The configuration object.
     * @param {string} options.format - The format to export the data in. Allowed
     *     options are "CSV", "JSON", "AVRO", or "PARQUET". Default: "CSV".
     * @param {boolean} options.gzip - Specify if you would like the file compressed
     *     with GZIP. Default: false.
     * @param {string} [options.jobId] Custom job id.
     * @param {string} [options.jobPrefix] Prefix to apply to the job id.
     * @param {JobCallback} callback - The callback function.
     * @param {?error} callback.err - An error returned while making this request
     * @param {Job} callback.job - The job used to export the table.
     * @param {object} callback.apiResponse - The full API response.
     * @returns {Promise<JobResponse>}
     *
     * @throws {Error} If destination isn't a File object.
     * @throws {Error} If destination format isn't recongized.
     *
     * @example
     * ```
     * const {Storage} = require('@google-cloud/storage');
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * const storage = new Storage({
     *   projectId: 'grape-spaceship-123'
     * });
     * const extractedFile = storage.bucket('institutions').file('2014.csv');
     *
     * function callback(err, job, apiResponse) {
     *   // `job` is a Job object that can be used to check the status of the
     *   // request.
     * }
     *
     * //-
     * // To use the default options, just pass a {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}
     * object.
     * //
     * // Note: The exported format type will be inferred by the file's extension.
     * // If you wish to override this, or provide an array of destination files,
     * // you must provide an `options` object.
     * //-
     * table.createExtractJob(extractedFile, callback);
     *
     * //-
     * // If you need more customization, pass an `options` object.
     * //-
     * const options = {
     *   format: 'json',
     *   gzip: true
     * };
     *
     * table.createExtractJob(extractedFile, options, callback);
     *
     * //-
     * // You can also specify multiple destination files.
     * //-
     * table.createExtractJob([
     *   storage.bucket('institutions').file('2014.json'),
     *   storage.bucket('institutions-copy').file('2014.json')
     * ], options, callback);
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.createExtractJob(extractedFile, options).then((data) => {
     *   const job = data[0];
     *   const apiResponse = data[1];
     * });
     * ```
     */
    createExtractJob(destination: File, options?: CreateExtractJobOptions): Promise<JobResponse>;
    createExtractJob(destination: File, options: CreateExtractJobOptions, callback: JobCallback): void;
    createExtractJob(destination: File, callback: JobCallback): void;
    /**
     * Load data from a local file or Storage {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}.
     *
     * By loading data this way, you create a load job that will run your data
     * load asynchronously. If you would like instantaneous access to your data,
     * insert it using {@liink Table#insert}.
     *
     * Note: The file type will be inferred by the given file's extension. If you
     * wish to override this, you must provide `metadata.format`.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert| Jobs: insert API Documentation}
     *
     * @param {string|File|File[]} source The source file to load. A string (path)
     * to a local file, or one or more {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}
     * objects.
     * @param {object} [metadata] Metadata to set with the load operation. The
     *     metadata object should be in the format of the
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad| `configuration.load`}
     * property of a Jobs resource.
     * @param {string} [metadata.format] The format the data being loaded is in.
     *     Allowed options are "AVRO", "CSV", "JSON", "ORC", or "PARQUET".
     * @param {string} [metadata.jobId] Custom job id.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the job id.
     * @param {JobCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {Job} callback.job The job used to load your data.
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobResponse>}
     *
     * @throws {Error} If the source isn't a string file name or a File instance.
     *
     * @example
     * ```
     * const {Storage} = require('@google-cloud/storage');
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * //-
     * // Load data from a local file.
     * //-
     * const callback = (err, job, apiResponse) => {
     *   // `job` is a Job object that can be used to check the status of the
     *   // request.
     * };
     *
     * table.createLoadJob('./institutions.csv', callback);
     *
     * //-
     * // You may also pass in metadata in the format of a Jobs resource. See
     * // (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad)
     * // for a full list of supported values.
     * //-
     * const metadata = {
     *   encoding: 'ISO-8859-1',
     *   sourceFormat: 'NEWLINE_DELIMITED_JSON'
     * };
     *
     * table.createLoadJob('./my-data.csv', metadata, callback);
     *
     * //-
     * // Load data from a file in your Cloud Storage bucket.
     * //-
     * const storage = new Storage({
     *   projectId: 'grape-spaceship-123'
     * });
     * const data = storage.bucket('institutions').file('data.csv');
     * table.createLoadJob(data, callback);
     *
     * //-
     * // Load data from multiple files in your Cloud Storage bucket(s).
     * //-
     * table.createLoadJob([
     *   storage.bucket('institutions').file('2011.csv'),
     *   storage.bucket('institutions').file('2012.csv')
     * ], callback);
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.createLoadJob(data).then((data) => {
     *   const job = data[0];
     *   const apiResponse = data[1];
     * });
     * ```
     */
    createLoadJob(source: string | File | File[], metadata?: JobLoadMetadata): Promise<JobResponse>;
    createLoadJob(source: string | File | File[], metadata: JobLoadMetadata, callback: JobCallback): void;
    createLoadJob(source: string | File | File[], callback: JobCallback): void;
    /**
     * @param {string | File | File[]} source
     * @param {JobLoadMetadata} metadata
     * @returns {Promise<JobResponse>}
     * @private
     */
    _createLoadJob(source: string | File | File[], metadata: JobLoadMetadata): Promise<JobResponse>;
    /**
     * Run a query as a job. No results are immediately returned. Instead, your
     * callback will be executed with a {@link Job} object that you must
     * ping for the results. See the Job documentation for explanations of how to
     * check on the status of the job.
     *
     * See {@link BigQuery#createQueryJob} for full documentation of this method.
     */
    createQueryJob(options: Query): Promise<JobResponse>;
    createQueryJob(options: Query, callback: JobCallback): void;
    /**
     * Run a query scoped to your dataset as a readable object stream.
     *
     * See {@link BigQuery#createQueryStream} for full documentation of this
     * method.
     *
     * @param {object} query See {@link BigQuery#createQueryStream} for full
     *     documentation of this method.
     * @returns {stream} See {@link BigQuery#createQueryStream} for full
     *     documentation of this method.
     */
    createQueryStream(query: Query): Duplex;
    /**
     * Creates a write stream. Unlike the public version, this will not
     * automatically poll the underlying job.
     *
     * @private
     *
     * @param {string|object} [metadata] Metadata to set with the load operation.
     *     The metadata object should be in the format of the
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad| `configuration.load`}
     * property of a Jobs resource. If a string is given, it will be used
     * as the filetype.
     * @param {string} [metadata.jobId] Custom job id.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the job id.
     * @returns {WritableStream}
     */
    createWriteStream_(metadata: JobLoadMetadata | string): Writable;
    /**
     * Load data into your table from a readable stream of AVRO, CSV, JSON, ORC,
     * or PARQUET data.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert| Jobs: insert API Documentation}
     *
     * @param {string|object} [metadata] Metadata to set with the load operation.
     *     The metadata object should be in the format of the
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad| `configuration.load`}
     * property of a Jobs resource. If a string is given,
     * it will be used as the filetype.
     * @param {string} [metadata.jobId] Custom job id.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the job id.
     * @returns {WritableStream}
     *
     * @throws {Error} If source format isn't recognized.
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * //-
     * // Load data from a CSV file.
     * //-
     * const request = require('request');
     *
     * const csvUrl = 'http://goo.gl/kSE7z6';
     *
     * const metadata = {
     *   allowJaggedRows: true,
     *   skipLeadingRows: 1
     * };
     *
     * request.get(csvUrl)
     *   .pipe(table.createWriteStream(metadata))
     *   .on('job', (job) => {
     *     // `job` is a Job object that can be used to check the status of the
     *     // request.
     *   })
     *   .on('complete', (job) => {
     *     // The job has completed successfully.
     *   });
     *
     * //-
     * // Load data from a JSON file.
     * //-
     * const fs = require('fs');
     *
     * fs.createReadStream('./test/testdata/testfile.json')
     *   .pipe(table.createWriteStream('json'))
     *   .on('job', (job) => {
     *     // `job` is a Job object that can be used to check the status of the
     *     // request.
     *   })
     *   .on('complete', (job) => {
     *     // The job has completed successfully.
     *   });
     * ```
     */
    createWriteStream(metadata: JobLoadMetadata | string): Writable;
    /**
     * Export table to Cloud Storage.
     *
     * @param {string|File} destination Where the file should be exported
     *     to. A string or a {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}.
     * @param {object} [options] The configuration object.
     * @param {string} [options.format="CSV"] The format to export the data in.
     *     Allowed options are "AVRO", "CSV", "JSON", "ORC" or "PARQUET".
     * @param {boolean} [options.gzip] Specify if you would like the file compressed
     *     with GZIP. Default: false.
     * @param {string} [options.jobId] Custom id for the underlying job.
     * @param {string} [options.jobPrefix] Prefix to apply to the underlying job id.
     * @param {JobMetadataCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobMetadataResponse>}
     *
     * @throws {Error} If destination isn't a File object.
     * @throws {Error} If destination format isn't recongized.
     *
     * @example
     * ```
     * const Storage = require('@google-cloud/storage');
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * const storage = new Storage({
     *   projectId: 'grape-spaceship-123'
     * });
     * const extractedFile = storage.bucket('institutions').file('2014.csv');
     *
     * //-
     * // To use the default options, just pass a {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}
     * object.
     * //
     * // Note: The exported format type will be inferred by the file's extension.
     * // If you wish to override this, or provide an array of destination files,
     * // you must provide an `options` object.
     * //-
     * table.extract(extractedFile, (err, apiResponse) => {});
     *
     * //-
     * // If you need more customization, pass an `options` object.
     * //-
     * const options = {
     *   format: 'json',
     *   gzip: true
     * };
     *
     * table.extract(extractedFile, options, (err, apiResponse) => {});
     *
     * //-
     * // You can also specify multiple destination files.
     * //-
     * table.extract([
     *   storage.bucket('institutions').file('2014.json'),
     *   storage.bucket('institutions-copy').file('2014.json')
     * ], options, (err, apiResponse) => {});
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.extract(extractedFile, options).then((data) => {
     *   const apiResponse = data[0];
     * });
     * ```
     */
    extract(destination: File, options?: CreateExtractJobOptions): Promise<JobMetadataResponse>;
    extract(destination: File, options: CreateExtractJobOptions, callback?: JobMetadataCallback): void;
    extract(destination: File, callback?: JobMetadataCallback): void;
    /**
     * @callback RowsCallback
     * @param {?Error} err Request error, if any.
     * @param {array} rows The rows.
     * @param {object} apiResponse The full API response.
     */
    /**
     * @typedef {array} RowsResponse
     * @property {array} 0 The rows.
     */
    getRows(options?: GetRowsOptions): Promise<RowsResponse>;
    getRows(options: GetRowsOptions, callback: RowsCallback): void;
    getRows(callback: RowsCallback): void;
    /**
     * @callback InsertRowsCallback
     * @param {?Error} err Request error, if any.
     * @param {?Error} err.errors If present, these represent partial
     *     failures. It's possible for part of your request to be completed
     *     successfully, while the other part was not.
     * @param {object} apiResponse The full API response.
     */
    /**
     * @typedef {array} InsertRowsResponse
     * @property {object} 0 The full API response.
     */
    /**
     * Stream data into BigQuery one record at a time without running a load job.
     *
     * If you need to create an entire table from a file, consider using
     * {@link Table#load} instead.
     *
     * Note, if a table was recently created, inserts may fail until the table
     * is consistent within BigQuery. If a `schema` is supplied, this method will
     * automatically retry those failed inserts, and it will even create the
     * table with the provided schema if it does not exist.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/tabledata/insertAll| Tabledata: insertAll API Documentation}
     * See {@link https://cloud.google.com/bigquery/quotas#streaming_inserts| Streaming Insert Limits}
     * See {@link https://developers.google.com/bigquery/troubleshooting-errors| Troubleshooting Errors}
     *
     * @param {object|object[]} rows The rows to insert into the table.
     * @param {object} [options] Configuration object.
     * @param {boolean} [options.createInsertId=true] Automatically insert a
     *     default row id when one is not provided.
     * @param {boolean} [options.ignoreUnknownValues=false] Accept rows that contain
     *     values that do not match the schema. The unknown values are ignored.
     * @param {number} [options.partialRetries=3] Number of times to retry
     *     inserting rows for cases of partial failures.
     * @param {boolean} [options.raw] If `true`, the `rows` argument is expected to
     *     be formatted as according to the
     *     {@link https://cloud.google.com/bigquery/docs/reference/v2/tabledata/insertAll| specification}.
     * @param {string|object} [options.schema] If provided will automatically
     *     create a table if it doesn't already exist. Note that this can take
     *     longer than 2 minutes to complete. A comma-separated list of
     *     name:type pairs.
     *     Valid types are "string", "integer", "float", "boolean", and
     *     "timestamp". If the type is omitted, it is assumed to be "string".
     *     Example: "name:string, age:integer". Schemas can also be specified as a
     *     JSON array of fields, which allows for nested and repeated fields. See
     *     a {@link http://goo.gl/sl8Dmg| Table resource} for more detailed information.
     * @param {boolean} [options.skipInvalidRows=false] Insert all valid rows of a
     *     request, even if invalid rows exist.
     * @param {string} [options.templateSuffix] Treat the destination table as a
     *     base template, and insert the rows into an instance table named
     *     "{destination}{templateSuffix}". BigQuery will manage creation of
     *     the instance table, using the schema of the base template table. See
     *     {@link https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables| Automatic table creation using template tables}
     *     for considerations when working with templates tables.
     * @param {InsertRowsCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request.
     * @param {object[]} callback.err.errors If present, these represent partial
     *     failures. It's possible for part of your request to be completed
     *     successfully, while the other part was not.
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<InsertRowsResponse>}
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * //-
     * // Insert a single row.
     * //-
     * table.insert({
     *   INSTNM: 'Motion Picture Institute of Michigan',
     *   CITY: 'Troy',
     *   STABBR: 'MI'
     * }, insertHandler);
     *
     * //-
     * // Insert multiple rows at a time.
     * //-
     * const rows = [
     *   {
     *     INSTNM: 'Motion Picture Institute of Michigan',
     *     CITY: 'Troy',
     *     STABBR: 'MI'
     *   },
     *   // ...
     * ];
     *
     * table.insert(rows, insertHandler);
     *
     * //-
     * // Insert a row as according to the <a href="https://cloud.google.com/bigquery/docs/reference/v2/tabledata/insertAll">specification</a>.
     * //-
     * const row = {
     *   insertId: '1',
     *   json: {
     *     INSTNM: 'Motion Picture Institute of Michigan',
     *     CITY: 'Troy',
     *     STABBR: 'MI'
     *   }
     * };
     *
     * const options = {
     *   raw: true
     * };
     *
     * table.insert(row, options, insertHandler);
     *
     * //-
     * // Handling the response. See <a href="https://developers.google.com/bigquery/troubleshooting-errors">Troubleshooting Errors</a> for best practices on how to handle errors.
     * //-
     * function insertHandler(err, apiResponse) {
     *   if (err) {
     *     // An API error or partial failure occurred.
     *
     *     if (err.name === 'PartialFailureError') {
     *       // Some rows failed to insert, while others may have succeeded.
     *
     *       // err.errors (object[]):
     *       // err.errors[].row (original row object passed to `insert`)
     *       // err.errors[].errors[].reason
     *       // err.errors[].errors[].message
     *     }
     *   }
     * }
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.insert(rows)
     *   .then((data) => {
     *     const apiResponse = data[0];
     *   })
     *   .catch((err) => {
     *     // An API error or partial failure occurred.
     *
     *     if (err.name === 'PartialFailureError') {
     *       // Some rows failed to insert, while others may have succeeded.
     *
     *       // err.errors (object[]):
     *       // err.errors[].row (original row object passed to `insert`)
     *       // err.errors[].errors[].reason
     *       // err.errors[].errors[].message
     *     }
     *   });
     * ```
     */
    insert(rows: RowMetadata | RowMetadata[], options?: InsertRowsOptions): Promise<InsertRowsResponse>;
    insert(rows: RowMetadata | RowMetadata[], options: InsertRowsOptions, callback: InsertRowsCallback): void;
    insert(rows: RowMetadata | RowMetadata[], callback: InsertRowsCallback): void;
    /**
     * Insert rows with retries, but will create the table if not exists.
     *
     * @param {RowMetadata | RowMetadata[]} rows
     * @param {InsertRowsOptions} options
     * @returns {Promise<bigquery.ITableDataInsertAllResponse | bigquery.ITable>}
     * @private
     */
    private _insertAndCreateTable;
    /**
     * This method will attempt to insert rows while retrying any partial failures
     * that occur along the way. Because partial insert failures are returned
     * differently, we can't depend on our usual retry strategy.
     *
     * @private
     *
     * @param {RowMetadata|RowMetadata[]} rows The rows to insert.
     * @param {InsertRowsOptions} options Insert options.
     * @returns {Promise<bigquery.ITableDataInsertAllResponse>}
     */
    private _insertWithRetry;
    /**
     * This method does the bulk of the work for processing options and making the
     * network request.
     *
     * @private
     *
     * @param {RowMetadata|RowMetadata[]} rows The rows to insert.
     * @param {InsertRowsOptions} options Insert options.
     * @returns {Promise<bigquery.ITableDataInsertAllResponse>}
     */
    private _insert;
    createInsertStream(options?: InsertStreamOptions): Writable;
    load(source: string | File | File[], metadata?: JobLoadMetadata): Promise<JobMetadataResponse>;
    load(source: string | File | File[], metadata: JobLoadMetadata, callback: JobMetadataCallback): void;
    load(source: string | File | File[], callback: JobMetadataCallback): void;
    /**
     * Load data from a local file or Storage {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}.
     *
     * By loading data this way, you create a load job that will run your data
     * load asynchronously. If you would like instantaneous access to your data,
     * insert it using {@link Table#insert}.
     *
     * Note: The file type will be inferred by the given file's extension. If you
     * wish to override this, you must provide `metadata.format`.
     *
     * @param {string|File} source The source file to load. A filepath as a string
     *     or a {@link
     * https://googleapis.dev/nodejs/storage/latest/File.html File}
     * object.
     * @param {object} [metadata] Metadata to set with the load operation. The
     *     metadata object should be in the format of the
     *     {@link https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad| `configuration.load`}
     * property of a Jobs resource.
     * @param {string} [metadata.format] The format the data being loaded is in.
     *     Allowed options are "AVRO", "CSV", "JSON", "ORC", or "PARQUET".
     * @param {string} [metadata.jobId] Custom id for the underlying job.
     * @param {string} [metadata.jobPrefix] Prefix to apply to the underlying job
     *     id.
     * @param {JobMetadataCallback} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<JobMetadataResponse>}
     *
     * @throws {Error} If the source isn't a string file name or a File instance.
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * //-
     * // Load data from a local file.
     * //-
     * table.load('./institutions.csv', (err, apiResponse) => {});
     *
     * //-
     * // You may also pass in metadata in the format of a Jobs resource. See
     * // (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad)
     * // for a full list of supported values.
     * //-
     * const metadata = {
     *   encoding: 'ISO-8859-1',
     *   sourceFormat: 'NEWLINE_DELIMITED_JSON'
     * };
     *
     * table.load('./my-data.csv', metadata, (err, apiResponse) => {});
     *
     * //-
     * // Load data from a file in your Cloud Storage bucket.
     * //-
     * const gcs = require('@google-cloud/storage')({
     *   projectId: 'grape-spaceship-123'
     * });
     * const data = gcs.bucket('institutions').file('data.csv');
     * table.load(data, (err, apiResponse) => {});
     *
     * //-
     * // Load data from multiple files in your Cloud Storage bucket(s).
     * //-
     * table.load([
     *   gcs.bucket('institutions').file('2011.csv'),
     *   gcs.bucket('institutions').file('2012.csv')
     * ], function(err, apiResponse) {});
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.load(data).then(function(data) {
     *   const apiResponse = data[0];
     * });
     * ```
     */
    load(source: string | File | File[], metadata?: JobLoadMetadata): Promise<JobMetadataResponse>;
    load(source: string | File | File[], metadata: JobLoadMetadata, callback: JobMetadataCallback): void;
    load(source: string | File | File[], callback: JobMetadataCallback): void;
    /**
     * Run a query scoped to your dataset.
     *
     * See {@link BigQuery#query} for full documentation of this method.
     * @param {object} query See {@link BigQuery#query} for full documentation of this method.
     * @param {function} [callback] See {@link BigQuery#query} for full documentation of this method.
     * @returns {Promise<SimpleQueryRowsResponse>}
     */
    query(query: Query): Promise<SimpleQueryRowsResponse>;
    query(query: string): Promise<SimpleQueryRowsResponse>;
    query(query: Query, callback: SimpleQueryRowsCallback): void;
    /**
     * Set the metadata on the table.
     *
     * See {@link https://cloud.google.com/bigquery/docs/reference/v2/tables/patch| Tables: patch API Documentation}
     *
     * @param {object} metadata The metadata key/value object to set.
     * @param {string} metadata.description A user-friendly description of the
     *     table.
     * @param {string} metadata.name A descriptive name for the table.
     * @param {string|object} metadata.schema A comma-separated list of name:type
     *     pairs. Valid types are "string", "integer", "float", "boolean",
     * "bytes", "record", and "timestamp". If the type is omitted, it is assumed
     * to be "string". Example: "name:string, age:integer". Schemas can also be
     *     specified as a JSON array of fields, which allows for nested and
     * repeated fields. See a {@link http://goo.gl/sl8Dmg| Table resource} for more
     * detailed information.
     * @param {function} [callback] The callback function.
     * @param {?error} callback.err An error returned while making this request.
     * @param {object} callback.apiResponse The full API response.
     * @returns {Promise<common.SetMetadataResponse>}
     *
     * @example
     * ```
     * const {BigQuery} = require('@google-cloud/bigquery');
     * const bigquery = new BigQuery();
     * const dataset = bigquery.dataset('my-dataset');
     * const table = dataset.table('my-table');
     *
     * const metadata = {
     *   name: 'My recipes',
     *   description: 'A table for storing my recipes.',
     *   schema: 'name:string, servings:integer, cookingTime:float, quick:boolean'
     * };
     *
     * table.setMetadata(metadata, (err, metadata, apiResponse) => {});
     *
     * //-
     * // If the callback is omitted, we'll return a Promise.
     * //-
     * table.setMetadata(metadata).then((data) => {
     *   const metadata = data[0];
     *   const apiResponse = data[1];
     * });
     * ```
     */
    setMetadata(metadata: SetTableMetadataOptions): Promise<SetMetadataResponse>;
    setMetadata(metadata: SetTableMetadataOptions, callback: ResponseCallback): void;
    /**
     * Run a query scoped to your dataset.
     * @returns {Promise<PolicyResponse>}
     */
    getIamPolicy(optionsOrCallback?: GetPolicyOptions | PolicyCallback): Promise<PolicyResponse>;
    getIamPolicy(options: GetPolicyOptions, callback: PolicyCallback): void;
    /**
     * Run a query scoped to your dataset.
     * @returns {Promise<PolicyResponse>}
     */
    setIamPolicy(policy: Policy, options?: SetPolicyOptions): Promise<PolicyResponse>;
    setIamPolicy(policy: Policy, options: SetPolicyOptions, callback: PolicyCallback): void;
    setIamPolicy(policy: Policy, callback: PolicyCallback): void;
    /**
     * Run a query scoped to your dataset.
     * @returns {Promise<PermissionsResponse>}
     */
    testIamPermissions(permissions: string | string[]): Promise<PermissionsResponse>;
    testIamPermissions(permissions: string | string[], callback: PermissionsCallback): void;
}
/**
 * Reference to the {@link Table} class.
 * @name module:@google-cloud/bigquery.Table
 * @see Table
 */
export { Table };
