Repository URL to install this package:
|
Version:
293.0.0-0 ▾
|
{
"kind": "discovery#restDescription",
"discoveryVersion": "v1",
"id": "bigquery:v2",
"name": "bigquery",
"version": "v2",
"revision": "0",
"title": "BigQuery API",
"description": "A data platform for customers to create, manage, share and query data.",
"ownerDomain": "google.com",
"ownerName": "Google",
"icons": {
"x16": "http://www.google.com/images/icons/product/search-16.gif",
"x32": "http://www.google.com/images/icons/product/search-32.gif"
},
"documentationLink": "https://developers.google.com/bigquery/",
"protocol": "rest",
"rootUrl": "https://bigquery.googleapis.com/",
"mtlsRootUrl": "https://bigquery.mtls.googleapis.com/",
"servicePath": "",
"baseUrl": "https://bigquery.googleapis.com/",
"batchPath": "batch",
"parameters": {
"access_token": {
"type": "string",
"description": "OAuth access token.",
"location": "query"
},
"alt": {
"type": "string",
"description": "Data format for response.",
"default": "json",
"enum": [
"json",
"media",
"proto"
],
"enumDescriptions": [
"Responses with Content-Type of application/json",
"Media download with context-dependent Content-Type",
"Responses with Content-Type of application/x-protobuf"
],
"location": "query"
},
"callback": {
"type": "string",
"description": "JSONP",
"location": "query"
},
"fields": {
"type": "string",
"description": "Selector specifying which fields to include in a partial response.",
"location": "query"
},
"key": {
"type": "string",
"description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
"location": "query"
},
"oauth_token": {
"type": "string",
"description": "OAuth 2.0 token for the current user.",
"location": "query"
},
"prettyPrint": {
"type": "boolean",
"description": "Returns response with indentations and line breaks.",
"default": "true",
"location": "query"
},
"quotaUser": {
"type": "string",
"description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
"location": "query"
},
"upload_protocol": {
"type": "string",
"description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
"location": "query"
},
"uploadType": {
"type": "string",
"description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
"location": "query"
},
"$.xgafv": {
"type": "string",
"description": "V1 error format.",
"enum": [
"1",
"2"
],
"enumDescriptions": [
"v1 error format",
"v2 error format"
],
"location": "query"
}
},
"auth": {
"oauth2": {
"scopes": {
"https://www.googleapis.com/auth/bigquery": {
"description": "View and manage your data in Google BigQuery"
},
"https://www.googleapis.com/auth/bigquery.insertdata": {
"description": "Insert data into Google BigQuery"
},
"https://www.googleapis.com/auth/bigquery.readonly": {
"description": "View your data in Google BigQuery"
},
"https://www.googleapis.com/auth/cloud-platform": {
"description": "View and manage your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/cloud-platform.read-only": {
"description": "View your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/devstorage.full_control": {
"description": "Manage your data and permissions in Google Cloud Storage"
},
"https://www.googleapis.com/auth/devstorage.read_only": {
"description": "View your data in Google Cloud Storage"
},
"https://www.googleapis.com/auth/devstorage.read_write": {
"description": "Manage your data in Google Cloud Storage"
}
}
}
},
"schemas": {
"Dataset": {
"id": "Dataset",
"type": "object",
"properties": {
"kind": {
"description": "Output only. The resource type.",
"type": "string"
},
"etag": {
"description": "Output only. A hash of the resource.",
"type": "string"
},
"id": {
"description": "Output only. The fully-qualified unique name of the dataset in the format\nprojectId:datasetId. The dataset name without the project name is given in\nthe datasetId field. When creating a new dataset, leave this field blank,\nand instead specify the datasetId field.",
"type": "string"
},
"selfLink": {
"description": "Output only. A URL that can be used to access the resource again. You can\nuse this URL in Get or Update requests to the resource.",
"type": "string"
},
"datasetReference": {
"description": "Required. A reference that identifies the dataset.",
"$ref": "DatasetReference"
},
"friendlyName": {
"description": "Optional. A descriptive name for the dataset.",
"type": "string"
},
"description": {
"description": "Optional. A user-friendly description of the dataset.",
"type": "string"
},
"defaultTableExpirationMs": {
"description": "Optional. The default lifetime of all tables in the dataset, in milliseconds\nThe minimum value is 3600000 milliseconds (one hour).\nOnce this property is set, all newly-created tables in the dataset will\nhave an expirationTime property set to the creation time plus the value in\nthis property, and changing the value will only affect new tables, not\nexisting ones. When the expirationTime for a given table is reached, that\ntable will be deleted automatically.\nIf a table's expirationTime is modified or removed before the table\nexpires, or if you provide an explicit expirationTime when creating a\ntable, that value takes precedence over the default expiration time\nindicated by this property.",
"type": "string",
"format": "int64"
},
"defaultPartitionExpirationMs": {
"description": "This default partition expiration, expressed in milliseconds.\n\nWhen new time-partitioned tables are created in a dataset where this\nproperty is set, the table will inherit this value, propagated as the\n`TimePartitioning.expirationMs` property on the new table. If you set\n`TimePartitioning.expirationMs` explicitly when creating a table,\nthe `defaultPartitionExpirationMs` of the containing dataset is ignored.\n\nWhen creating a partitioned table, if `defaultPartitionExpirationMs`\nis set, the `defaultTableExpirationMs` value is ignored and the table\nwill not be inherit a table expiration deadline.",
"type": "string",
"format": "int64"
},
"labels": {
"description": "The labels associated with this dataset. You can use these\nto organize and group your datasets.\nYou can set this property when inserting or updating a dataset.\nSee <a\nhref=\"\/bigquery\/docs\/creating-managing-labels#creating_and_updating_dataset_labels\">Creating\nand Updating Dataset Labels<\/a> for more information.",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"access": {
"description": "Optional. An array of objects that define dataset access for one or more\nentities. You can set this property when inserting or updating a dataset in\norder to control who is allowed to access the data. If unspecified at\ndataset creation time, BigQuery adds default dataset access for the\nfollowing entities: access.specialGroup: projectReaders; access.role:\nREADER; access.specialGroup: projectWriters; access.role: WRITER;\naccess.specialGroup: projectOwners; access.role: OWNER;\naccess.userByEmail: [dataset creator email]; access.role: OWNER;",
"type": "array",
"items": {
"description": "An object that defines dataset access for an entity.",
"type": "object",
"properties": {
"role": {
"description": "Required. An IAM role ID that should be granted to the user, group,\nor domain specified in this access entry.\nThe following legacy mappings will be applied:\n OWNER <=> roles\/bigquery.dataOwner\n WRITER <=> roles\/bigquery.dataEditor\n READER <=> roles\/bigquery.dataViewer\nThis field will accept any of the above formats, but will return only\nthe legacy format. For example, if you set this field to\n\"roles\/bigquery.dataOwner\", it will be returned back as \"OWNER\".",
"type": "string"
},
"userByEmail": {
"description": "[Pick one] An email address of a user to grant access to. For example:\nfred@example.com. Maps to IAM policy member \"user:EMAIL\" or\n\"serviceAccount:EMAIL\".",
"type": "string"
},
"groupByEmail": {
"description": "[Pick one] An email address of a Google Group to grant access to.\nMaps to IAM policy member \"group:GROUP\".",
"type": "string"
},
"domain": {
"description": "[Pick one] A domain to grant access to. Any users signed in with the domain\nspecified will be granted the specified access. Example: \"example.com\".\nMaps to IAM policy member \"domain:DOMAIN\".",
"type": "string"
},
"specialGroup": {
"description": "[Pick one] A special group to grant access to. Possible values include:\n projectOwners: Owners of the enclosing project.\n projectReaders: Readers of the enclosing project.\n projectWriters: Writers of the enclosing project.\n allAuthenticatedUsers: All authenticated BigQuery users.\nMaps to similarly-named IAM members.",
"type": "string"
},
"iamMember": {
"description": "[Pick one] Some other type of member that appears in the IAM Policy but\nisn't a user, group, domain, or special group.",
"type": "string"
},
"view": {
"description": "[Pick one] A view from a different dataset to grant access to. Queries\nexecuted against that view will have read access to views\/tables\/routines\nin this dataset.\nThe role field is not required when this field is set. If that view is\nupdated by any user, access to the view needs to be granted again via an\nupdate operation.",
"$ref": "TableReference"
}
}
}
},
"creationTime": {
"description": "Output only. The time when this dataset was created, in milliseconds since the epoch.",
"type": "string",
"format": "int64"
},
"lastModifiedTime": {
"description": "Output only. The date when this dataset or any of its tables was last modified, in\nmilliseconds since the epoch.",
"type": "string",
"format": "int64"
},
"location": {
"description": "The geographic location where the dataset should reside. Possible\nvalues include EU and US. The default value is US.",
"type": "string"
},
"defaultEncryptionConfiguration": {
"description": "The default encryption key for all tables in the dataset.\nOnce this property is set, all newly-created partitioned tables in the\ndataset will have encryption key set to this value, unless table creation\nrequest (or query) overrides the key.",
"$ref": "EncryptionConfiguration"
}
}
},
"DatasetReference": {
"id": "DatasetReference",
"type": "object",
"properties": {
"datasetId": {
"description": "Required. A unique ID for this dataset, without the project name. The ID\nmust contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\nThe maximum length is 1,024 characters.",
"type": "string"
},
"projectId": {
"description": "Optional. The ID of the project containing this dataset.",
"type": "string"
}
}
},
"TableReference": {
"id": "TableReference",
"type": "object",
"properties": {
"projectId": {
"description": "Required. The ID of the project containing this table.",
"type": "string"
},
"datasetId": {
"description": "Required. The ID of the dataset containing this table.",
"type": "string"
},
"tableId": {
"description": "Required. The ID of the table. The ID must contain only\nletters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum\nlength is 1,024 characters. Certain operations allow\nsuffixing of the table ID with a partition decorator, such as\n`sample_table$20190123`.",
"type": "string"
}
}
},
"EncryptionConfiguration": {
"id": "EncryptionConfiguration",
"type": "object",
"properties": {
"kmsKeyName": {
"description": "Optional. Describes the Cloud KMS encryption key that will be used to\nprotect destination BigQuery table. The BigQuery Service Account associated\nwith your project requires access to this encryption key.",
"type": "string"
}
}
},
"DatasetList": {
"id": "DatasetList",
"type": "object",
"properties": {
"kind": {
"description": "Output only. The resource type.\nThis property always returns the value \"bigquery#datasetList\"",
"type": "string"
},
"etag": {
"description": "Output only. A hash value of the results page. You can use this property to\ndetermine if the page has changed since the last request.",
"type": "string"
},
"nextPageToken": {
"description": "A token that can be used to request the next results page. This property is\nomitted on the final results page.",
"type": "string"
},
"datasets": {
"description": "An array of the dataset resources in the project.\nEach resource contains basic information.\nFor full information about a particular dataset resource, use the Datasets:\nget method. This property is omitted when there are no datasets in the\nproject.",
"type": "array",
"items": {
"description": "A dataset resource with only a subset of fields, to be returned in a list of\ndatasets.",
"type": "object",
"properties": {
"kind": {
"description": "The resource type.\nThis property always returns the value \"bigquery#dataset\"",
"type": "string"
},
"id": {
"description": "The fully-qualified, unique, opaque ID of the dataset.",
"type": "string"
},
"datasetReference": {
"description": "The dataset reference.\nUse this property to access specific parts of the dataset's ID, such as\nproject ID or dataset ID.",
"$ref": "DatasetReference"
},
"labels": {
"description": "The labels associated with this dataset.\nYou can use these to organize and group your datasets.",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"friendlyName": {
"description": "An alternate name for the dataset. The friendly name is purely\ndecorative in nature.",
"type": "string"
},
"location": {
"description": "The geographic location where the dataset resides.",
"type": "string"
}
}
}
}
}
},
"Model": {
"id": "Model",
"type": "object",
"properties": {
"etag": {
"description": "Output only. A hash of this resource.",
"type": "string"
},
"modelReference": {
"description": "Required. Unique identifier for this model.",
"$ref": "ModelReference"
},
"creationTime": {
"description": "Output only. The time when this model was created, in millisecs since the epoch.",
"type": "string",
"format": "int64"
},
"lastModifiedTime": {
"description": "Output only. The time when this model was last modified, in millisecs since the epoch.",
"type": "string",
"format": "int64"
},
"description": {
"description": "Optional. A user-friendly description of this model.",
"type": "string"
},
"friendlyName": {
"description": "Optional. A descriptive name for this model.",
"type": "string"
},
"labels": {
"description": "The labels associated with this model. You can use these to organize\nand group your models. Label keys and values can be no longer\nthan 63 characters, can only contain lowercase letters, numeric\ncharacters, underscores and dashes. International characters are allowed.\nLabel values are optional. Label keys must start with a letter and each\nlabel in the list must have a different key.",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"expirationTime": {
"description": "Optional. The time when this model expires, in milliseconds since the epoch.\nIf not present, the model will persist indefinitely. Expired models\nwill be deleted and their storage reclaimed. The defaultTableExpirationMs\nproperty of the encapsulating dataset can be used to set a default\nexpirationTime on newly created models.",
"type": "string",
"format": "int64"
},
"location": {
"description": "Output only. The geographic location where the model resides. This value\nis inherited from the dataset.",
"type": "string"
},
"encryptionConfiguration": {
"description": "Custom encryption configuration (e.g., Cloud KMS keys). This shows the\nencryption configuration of the model data while stored in BigQuery\nstorage. This field can be used with PatchModel to update encryption key\nfor an already encrypted model.",
"$ref": "EncryptionConfiguration"
},
"modelType": {
"description": "Output only. Type of the model resource.",
"enumDescriptions": [
"",
"Linear regression model.",
"Logistic regression based classification model.",
"K-means clustering model.",
"Matrix factorization model.",
"DNN classifier model.",
"[Beta] An imported TensorFlow model.",
"DNN regressor model.",
"Boosted tree regressor model.",
"Boosted tree classifier model.",
"AutoML Tables regression model.",
"AutoML Tables classification model."
],
"type": "string",
"enum": [
"MODEL_TYPE_UNSPECIFIED",
"LINEAR_REGRESSION",
"LOGISTIC_REGRESSION",
"KMEANS",
"MATRIX_FACTORIZATION",
"DNN_CLASSIFIER",
"TENSORFLOW",
"DNN_REGRESSOR",
"BOOSTED_TREE_REGRESSOR",
"BOOSTED_TREE_CLASSIFIER",
"AUTOML_REGRESSOR",
"AUTOML_CLASSIFIER"
]
},
"trainingRuns": {
"description": "Output only. Information for all training runs in increasing order of start_time.",
"type": "array",
"items": {
"$ref": "TrainingRun"
}
},
"featureColumns": {
"description": "Output only. Input feature columns that were used to train this model.",
"type": "array",
"items": {
"$ref": "StandardSqlField"
}
},
"labelColumns": {
"description": "Output only. Label columns that were used to train this model.\nThe output of the model will have a \"predicted_\" prefix to these columns.",
"type": "array",
"items": {
"$ref": "StandardSqlField"
}
}
}
},
"ModelReference": {
"id": "ModelReference",
"description": "Id path of a model.",
"type": "object",
"properties": {
"projectId": {
"description": "Required. The ID of the project containing this model.",
"type": "string"
},
"datasetId": {
"description": "Required. The ID of the dataset containing this model.",
"type": "string"
},
"modelId": {
"description": "Required. The ID of the model. The ID must contain only\nletters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum\nlength is 1,024 characters.",
"type": "string"
}
}
},
"TrainingRun": {
"id": "TrainingRun",
"description": "Information about a single training query run for the model.",
"type": "object",
"properties": {
"trainingOptions": {
"description": "Options that were used for this training run, includes\nuser specified and default options that were used.",
"$ref": "TrainingOptions"
},
"startTime": {
"description": "The start time of this training run.",
"type": "string",
"format": "google-datetime"
},
"results": {
"description": "Output of each iteration run, results.size() <= max_iterations.",
"type": "array",
"items": {
"$ref": "IterationResult"
}
},
"evaluationMetrics": {
"description": "The evaluation metrics over training\/eval data that were computed at the\nend of training.",
"$ref": "EvaluationMetrics"
},
"dataSplitResult": {
"description": "Data split result of the training run. Only set when the input data is\nactually split.",
"$ref": "DataSplitResult"
}
}
},
"TrainingOptions": {
"id": "TrainingOptions",
"type": "object",
"properties": {
"maxIterations": {
"description": "The maximum number of iterations in training. Used only for iterative\ntraining algorithms.",
"type": "string",
"format": "int64"
},
"lossType": {
"description": "Type of loss function used during training run.",
"enumDescriptions": [
"",
"Mean squared loss, used for linear regression.",
"Mean log loss, used for logistic regression."
],
"type": "string",
"enum": [
"LOSS_TYPE_UNSPECIFIED",
"MEAN_SQUARED_LOSS",
"MEAN_LOG_LOSS"
]
},
"learnRate": {
"description": "Learning rate in training. Used only for iterative training algorithms.",
"type": "number",
"format": "double"
},
"l1Regularization": {
"description": "L1 regularization coefficient.",
"type": "number",
"format": "double"
},
"l2Regularization": {
"description": "L2 regularization coefficient.",
"type": "number",
"format": "double"
},
"minRelativeProgress": {
"description": "When early_stop is true, stops training when accuracy improvement is\nless than 'min_relative_progress'. Used only for iterative training\nalgorithms.",
"type": "number",
"format": "double"
},
"warmStart": {
"description": "Whether to train a model from the last checkpoint.",
"type": "boolean"
},
"earlyStop": {
"description": "Whether to stop early when the loss doesn't improve significantly\nany more (compared to min_relative_progress). Used only for iterative\ntraining algorithms.",
"type": "boolean"
},
"inputLabelColumns": {
"description": "Name of input label columns in training data.",
"type": "array",
"items": {
"type": "string"
}
},
"dataSplitMethod": {
"description": "The data split type for training and evaluation, e.g. RANDOM.",
"enumDescriptions": [
"",
"Splits data randomly.",
"Splits data with the user provided tags.",
"Splits data sequentially.",
"Data split will be skipped.",
"Splits data automatically: Uses NO_SPLIT if the data size is small.\nOtherwise uses RANDOM."
],
"type": "string",
"enum": [
"DATA_SPLIT_METHOD_UNSPECIFIED",
"RANDOM",
"CUSTOM",
"SEQUENTIAL",
"NO_SPLIT",
"AUTO_SPLIT"
]
},
"dataSplitEvalFraction": {
"description": "The fraction of evaluation data over the whole input data. The rest\nof data will be used as training data. The format should be double.\nAccurate to two decimal places.\nDefault value is 0.2.",
"type": "number",
"format": "double"
},
"dataSplitColumn": {
"description": "The column to split data with. This column won't be used as a\nfeature.\n1. When data_split_method is CUSTOM, the corresponding column should\nbe boolean. The rows with true value tag are eval data, and the false\nare training data.\n2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION\nrows (from smallest to largest) in the corresponding column are used\nas training data, and the rest are eval data. It respects the order\nin Orderable data types:\nhttps:\/\/cloud.google.com\/bigquery\/docs\/reference\/standard-sql\/data-types#data-type-properties",
"type": "string"
},
"learnRateStrategy": {
"description": "The strategy to determine learn rate for the current iteration.",
"enumDescriptions": [
"",
"Use line search to determine learning rate.",
"Use a constant learning rate."
],
"type": "string",
"enum": [
"LEARN_RATE_STRATEGY_UNSPECIFIED",
"LINE_SEARCH",
"CONSTANT"
]
},
"initialLearnRate": {
"description": "Specifies the initial learning rate for the line search learn rate\nstrategy.",
"type": "number",
"format": "double"
},
"labelClassWeights": {
"description": "Weights associated with each label class, for rebalancing the\ntraining data. Only applicable for classification models.",
"type": "object",
"additionalProperties": {
"type": "number",
"format": "double"
}
},
"userColumn": {
"description": "User column specified for matrix factorization models.",
"type": "string"
},
"itemColumn": {
"description": "Item column specified for matrix factorization models.",
"type": "string"
},
"distanceType": {
"description": "Distance type for clustering models.",
"enumDescriptions": [
"",
"Eculidean distance.",
"Cosine distance."
],
"type": "string",
"enum": [
"DISTANCE_TYPE_UNSPECIFIED",
"EUCLIDEAN",
"COSINE"
]
},
"numClusters": {
"description": "Number of clusters for clustering models.",
"type": "string",
"format": "int64"
},
"modelUri": {
"description": "[Beta] Google Cloud Storage URI from which the model was imported. Only\napplicable for imported models.",
"type": "string"
},
"optimizationStrategy": {
"description": "Optimization strategy for training linear regression models.",
"enumDescriptions": [
"",
"Uses an iterative batch gradient descent algorithm.",
"Uses a normal equation to solve linear regression problem."
],
"type": "string",
"enum": [
"OPTIMIZATION_STRATEGY_UNSPECIFIED",
"BATCH_GRADIENT_DESCENT",
"NORMAL_EQUATION"
]
},
"hiddenUnits": {
"description": "Hidden units for dnn models.",
"type": "array",
"items": {
"type": "string",
"format": "int64"
}
},
"batchSize": {
"description": "Batch size for dnn models.",
"type": "string",
"format": "int64"
},
"dropout": {
"description": "Dropout probability for dnn models.",
"type": "number",
"format": "double"
},
"maxTreeDepth": {
"description": "Maximum depth of a tree for boosted tree models.",
"type": "string",
"format": "int64"
},
"subsample": {
"description": "Subsample fraction of the training data to grow tree to prevent\noverfitting for boosted tree models.",
"type": "number",
"format": "double"
},
"minSplitLoss": {
"description": "Minimum split loss for boosted tree models.",
"type": "number",
"format": "double"
},
"numFactors": {
"description": "Num factors specified for matrix factorization models.",
"type": "string",
"format": "int64"
},
"feedbackType": {
"description": "Feedback type that specifies which algorithm to run for matrix\nfactorization.",
"enumDescriptions": [
"",
"Use weighted-als for implicit feedback problems.",
"Use nonweighted-als for explicit feedback problems."
],
"type": "string",
"enum": [
"FEEDBACK_TYPE_UNSPECIFIED",
"IMPLICIT",
"EXPLICIT"
]
},
"walsAlpha": {
"description": "Hyperparameter for matrix factoration when implicit feedback type is\nspecified.",
"type": "number",
"format": "double"
},
"kmeansInitializationMethod": {
"description": "The method used to initialize the centroids for kmeans algorithm.",
"enumDescriptions": [
"",
"Initializes the centroids randomly.",
"Initializes the centroids using data specified in\nkmeans_initialization_column.",
"Initializes with kmeans++."
],
"type": "string",
"enum": [
"KMEANS_INITIALIZATION_METHOD_UNSPECIFIED",
"RANDOM",
"CUSTOM",
"KMEANS_PLUS_PLUS"
]
},
"kmeansInitializationColumn": {
"description": "The column used to provide the initial centroids for kmeans algorithm\nwhen kmeans_initialization_method is CUSTOM.",
"type": "string"
}
}
},
"IterationResult": {
"id": "IterationResult",
"description": "Information about a single iteration of the training run.",
"type": "object",
"properties": {
"index": {
"description": "Index of the iteration, 0 based.",
"type": "integer",
"format": "int32"
},
"durationMs": {
"description": "Time taken to run the iteration in milliseconds.",
"type": "string",
"format": "int64"
},
"trainingLoss": {
"description": "Loss computed on the training data at the end of iteration.",
"type": "number",
"format": "double"
},
"evalLoss": {
"description": "Loss computed on the eval data at the end of iteration.",
"type": "number",
"format": "double"
},
"learnRate": {
"description": "Learn rate used for this iteration.",
"type": "number",
"format": "double"
},
"clusterInfos": {
"description": "Information about top clusters for clustering models.",
"type": "array",
"items": {
"$ref": "ClusterInfo"
}
},
"arimaResult": {
"$ref": "ArimaResult"
}
}
},
"ClusterInfo": {
"id": "ClusterInfo",
"description": "Information about a single cluster for clustering model.",
"type": "object",
"properties": {
"centroidId": {
"description": "Centroid id.",
"type": "string",
"format": "int64"
},
"clusterRadius": {
"description": "Cluster radius, the average distance from centroid\nto each point assigned to the cluster.",
"type": "number",
"format": "double"
},
"clusterSize": {
"description": "Cluster size, the total number of points assigned to the cluster.",
"type": "string",
"format": "int64"
}
}
},
"ArimaResult": {
"id": "ArimaResult",
"description": "(Auto-)arima fitting result. Wrap everything in ArimaResult for easier\nrefactoring if we want to use model-specific iteration results.",
"type": "object",
"properties": {
"arimaModelInfo": {
"description": "This message is repeated because there are multiple arima models\nfitted in auto-arima. For non-auto-arima model, its size is one.",
"type": "array",
"items": {
"$ref": "ArimaModelInfo"
}
},
"seasonalPeriods": {
"description": "Seasonal periods. Repeated because multiple periods are supported for\none time series.",
"enumDescriptions": [
"",
"No seasonality",
"Daily period, 24 hours.",
"Weekly period, 7 days.",
"Monthly period, 30 days or irregular.",
"Quarterly period, 90 days or irregular.",
"Yearly period, 365 days or irregular."
],
"type": "array",
"items": {
"type": "string",
"enum": [
"SEASONAL_PERIOD_TYPE_UNSPECIFIED",
"NO_SEASONALITY",
"DAILY",
"WEEKLY",
"MONTHLY",
"QUARTERLY",
"YEARLY"
]
}
}
}
},
"ArimaModelInfo": {
"id": "ArimaModelInfo",
"description": "Arima model information.",
"type": "object",
"properties": {
"nonSeasonalOrder": {
"description": "Non-seasonal order.",
"$ref": "ArimaOrder"
},
"arimaCoefficients": {
"description": "Arima coefficients.",
"$ref": "ArimaCoefficients"
},
"arimaFittingMetrics": {
"description": "Arima fitting metrics.",
"$ref": "ArimaFittingMetrics"
},
"hasDrift": {
"description": "Whether Arima model fitted with drift or not. It is always false\nwhen d is not 1.",
"type": "boolean"
},
"timeSeriesId": {
"description": "The id to indicate different time series.",
"type": "string"
},
"seasonalPeriods": {
"description": "Seasonal periods. Repeated because multiple periods are supported\nfor one time series.",
"enumDescriptions": [
"",
"No seasonality",
"Daily period, 24 hours.",
"Weekly period, 7 days.",
"Monthly period, 30 days or irregular.",
"Quarterly period, 90 days or irregular.",
"Yearly period, 365 days or irregular."
],
"type": "array",
"items": {
"type": "string",
"enum": [
"SEASONAL_PERIOD_TYPE_UNSPECIFIED",
"NO_SEASONALITY",
"DAILY",
"WEEKLY",
"MONTHLY",
"QUARTERLY",
"YEARLY"
]
}
}
}
},
"ArimaOrder": {
"id": "ArimaOrder",
"description": "Arima order, can be used for both non-seasonal and seasonal parts.",
"type": "object",
"properties": {
"p": {
"description": "Order of the autoregressive part.",
"type": "string",
"format": "int64"
},
"d": {
"description": "Order of the differencing part.",
"type": "string",
"format": "int64"
},
"q": {
"description": "Order of the moving-average part.",
"type": "string",
"format": "int64"
}
}
},
"ArimaCoefficients": {
"id": "ArimaCoefficients",
"description": "Arima coefficients.",
"type": "object",
"properties": {
"autoRegressiveCoefficients": {
"description": "Auto-regressive coefficients, an array of double.",
"type": "array",
"items": {
"type": "number",
"format": "double"
}
},
"movingAverageCoefficients": {
"description": "Moving-average coefficients, an array of double.",
"type": "array",
"items": {
"type": "number",
"format": "double"
}
},
"interceptCoefficient": {
"description": "Intercept coefficient, just a double not an array.",
"type": "number",
"format": "double"
}
}
},
"ArimaFittingMetrics": {
"id": "ArimaFittingMetrics",
"description": "ARIMA model fitting metrics.",
"type": "object",
"properties": {
"logLikelihood": {
"description": "Log-likelihood.",
"type": "number",
"format": "double"
},
"aic": {
"description": "AIC.",
"type": "number",
"format": "double"
},
"variance": {
"description": "Variance.",
"type": "number",
"format": "double"
}
}
},
"EvaluationMetrics": {
"id": "EvaluationMetrics",
"description": "Evaluation metrics of a model. These are either computed on all training\ndata or just the eval data based on whether eval data was used during\ntraining. These are not present for imported models.",
"type": "object",
"properties": {
"regressionMetrics": {
"description": "Populated for regression models and explicit feedback type matrix\nfactorization models.",
"$ref": "RegressionMetrics"
},
"binaryClassificationMetrics": {
"description": "Populated for binary classification\/classifier models.",
"$ref": "BinaryClassificationMetrics"
},
"multiClassClassificationMetrics": {
"description": "Populated for multi-class classification\/classifier models.",
"$ref": "MultiClassClassificationMetrics"
},
"clusteringMetrics": {
"description": "Populated for clustering models.",
"$ref": "ClusteringMetrics"
},
"rankingMetrics": {
"description": "[Alpha] Populated for implicit feedback type matrix factorization\nmodels.",
"$ref": "RankingMetrics"
}
}
},
"RegressionMetrics": {
"id": "RegressionMetrics",
"description": "Evaluation metrics for regression and explicit feedback type matrix\nfactorization models.",
"type": "object",
"properties": {
"meanAbsoluteError": {
"description": "Mean absolute error.",
"type": "number",
"format": "double"
},
"meanSquaredError": {
"description": "Mean squared error.",
"type": "number",
"format": "double"
},
"meanSquaredLogError": {
"description": "Mean squared log error.",
"type": "number",
"format": "double"
},
"medianAbsoluteError": {
"description": "Median absolute error.",
"type": "number",
"format": "double"
},
"rSquared": {
"description": "R^2 score.",
"type": "number",
"format": "double"
}
}
},
"BinaryClassificationMetrics": {
"id": "BinaryClassificationMetrics",
"description": "Evaluation metrics for binary classification\/classifier models.",
"type": "object",
"properties": {
"aggregateClassificationMetrics": {
"description": "Aggregate classification metrics.",
"$ref": "AggregateClassificationMetrics"
},
"binaryConfusionMatrixList": {
"description": "Binary confusion matrix at multiple thresholds.",
"type": "array",
"items": {
"$ref": "BinaryConfusionMatrix"
}
},
"positiveLabel": {
"description": "Label representing the positive class.",
"type": "string"
},
"negativeLabel": {
"description": "Label representing the negative class.",
"type": "string"
}
}
},
"AggregateClassificationMetrics": {
"id": "AggregateClassificationMetrics",
"description": "Aggregate metrics for classification\/classifier models. For multi-class\nmodels, the metrics are either macro-averaged or micro-averaged. When\nmacro-averaged, the metrics are calculated for each label and then an\nunweighted average is taken of those values. When micro-averaged, the\nmetric is calculated globally by counting the total number of correctly\npredicted rows.",
"type": "object",
"properties": {
"precision": {
"description": "Precision is the fraction of actual positive predictions that had\npositive actual labels. For multiclass this is a macro-averaged\nmetric treating each class as a binary classifier.",
"type": "number",
"format": "double"
},
"recall": {
"description": "Recall is the fraction of actual positive labels that were given a\npositive prediction. For multiclass this is a macro-averaged metric.",
"type": "number",
"format": "double"
},
"accuracy": {
"description": "Accuracy is the fraction of predictions given the correct label. For\nmulticlass this is a micro-averaged metric.",
"type": "number",
"format": "double"
},
"threshold": {
"description": "Threshold at which the metrics are computed. For binary\nclassification models this is the positive class threshold.\nFor multi-class classfication models this is the confidence\nthreshold.",
"type": "number",
"format": "double"
},
"f1Score": {
"description": "The F1 score is an average of recall and precision. For multiclass\nthis is a macro-averaged metric.",
"type": "number",
"format": "double"
},
"logLoss": {
"description": "Logarithmic Loss. For multiclass this is a macro-averaged metric.",
"type": "number",
"format": "double"
},
"rocAuc": {
"description": "Area Under a ROC Curve. For multiclass this is a macro-averaged\nmetric.",
"type": "number",
"format": "double"
}
}
},
"BinaryConfusionMatrix": {
"id": "BinaryConfusionMatrix",
"description": "Confusion matrix for binary classification models.",
"type": "object",
"properties": {
"positiveClassThreshold": {
"description": "Threshold value used when computing each of the following metric.",
"type": "number",
"format": "double"
},
"truePositives": {
"description": "Number of true samples predicted as true.",
"type": "string",
"format": "int64"
},
"falsePositives": {
"description": "Number of false samples predicted as true.",
"type": "string",
"format": "int64"
},
"trueNegatives": {
"description": "Number of true samples predicted as false.",
"type": "string",
"format": "int64"
},
"falseNegatives": {
"description": "Number of false samples predicted as false.",
"type": "string",
"format": "int64"
},
"precision": {
"description": "The fraction of actual positive predictions that had positive actual\nlabels.",
"type": "number",
"format": "double"
},
"recall": {
"description": "The fraction of actual positive labels that were given a positive\nprediction.",
"type": "number",
"format": "double"
},
"f1Score": {
"description": "The equally weighted average of recall and precision.",
"type": "number",
"format": "double"
},
"accuracy": {
"description": "The fraction of predictions given the correct label.",
"type": "number",
"format": "double"
}
}
},
"MultiClassClassificationMetrics": {
"id": "MultiClassClassificationMetrics",
"description": "Evaluation metrics for multi-class classification\/classifier models.",
"type": "object",
"properties": {
"aggregateClassificationMetrics": {
"description": "Aggregate classification metrics.",
"$ref": "AggregateClassificationMetrics"
},
"confusionMatrixList": {
"description": "Confusion matrix at different thresholds.",
"type": "array",
"items": {
"$ref": "ConfusionMatrix"
}
}
}
},
"ConfusionMatrix": {
"id": "ConfusionMatrix",
"description": "Confusion matrix for multi-class classification models.",
"type": "object",
"properties": {
"confidenceThreshold": {
"description": "Confidence threshold used when computing the entries of the\nconfusion matrix.",
"type": "number",
"format": "double"
},
"rows": {
"description": "One row per actual label.",
"type": "array",
"items": {
"$ref": "Row"
}
}
}
},
"Row": {
"id": "Row",
"description": "A single row in the confusion matrix.",
"type": "object",
"properties": {
"actualLabel": {
"description": "The original label of this row.",
"type": "string"
},
"entries": {
"description": "Info describing predicted label distribution.",
"type": "array",
"items": {
"$ref": "Entry"
}
}
}
},
"Entry": {
"id": "Entry",
"description": "A single entry in the confusion matrix.",
"type": "object",
"properties": {
"predictedLabel": {
"description": "The predicted label. For confidence_threshold > 0, we will\nalso add an entry indicating the number of items under the\nconfidence threshold.",
"type": "string"
},
"itemCount": {
"description": "Number of items being predicted as this label.",
"type": "string",
"format": "int64"
}
}
},
"ClusteringMetrics": {
"id": "ClusteringMetrics",
"description": "Evaluation metrics for clustering models.",
"type": "object",
"properties": {
"daviesBouldinIndex": {
"description": "Davies-Bouldin index.",
"type": "number",
"format": "double"
},
"meanSquaredDistance": {
"description": "Mean of squared distances between each sample to its cluster centroid.",
"type": "number",
"format": "double"
},
"clusters": {
"description": "[Beta] Information for all clusters.",
"type": "array",
"items": {
"$ref": "Cluster"
}
}
}
},
"Cluster": {
"id": "Cluster",
"description": "Message containing the information about one cluster.",
"type": "object",
"properties": {
"centroidId": {
"description": "Centroid id.",
"type": "string",
"format": "int64"
},
"featureValues": {
"description": "Values of highly variant features for this cluster.",
"type": "array",
"items": {
"$ref": "FeatureValue"
}
},
"count": {
"description": "Count of training data rows that were assigned to this cluster.",
"type": "string",
"format": "int64"
}
}
},
"FeatureValue": {
"id": "FeatureValue",
"description": "Representative value of a single feature within the cluster.",
"type": "object",
"properties": {
"featureColumn": {
"description": "The feature column name.",
"type": "string"
},
"numericalValue": {
"description": "The numerical feature value. This is the centroid value for this\nfeature.",
"type": "number",
"format": "double"
},
"categoricalValue": {
"description": "The categorical feature value.",
"$ref": "CategoricalValue"
}
}
},
"CategoricalValue": {
"id": "CategoricalValue",
"description": "Representative value of a categorical feature.",
"type": "object",
"properties": {
"categoryCounts": {
"description": "Counts of all categories for the categorical feature. If there are\nmore than ten categories, we return top ten (by count) and return\none more CategoryCount with category \"_OTHER_\" and count as\naggregate counts of remaining categories.",
"type": "array",
"items": {
"$ref": "CategoryCount"
}
}
}
},
"CategoryCount": {
"id": "CategoryCount",
"description": "Represents the count of a single category within the cluster.",
"type": "object",
"properties": {
"category": {
"description": "The name of category.",
"type": "string"
},
"count": {
"description": "The count of training samples matching the category within the\ncluster.",
"type": "string",
"format": "int64"
}
}
},
"RankingMetrics": {
"id": "RankingMetrics",
"description": "Evaluation metrics used by weighted-ALS models specified by\nfeedback_type=implicit.",
"type": "object",
"properties": {
"meanAveragePrecision": {
"description": "Calculates a precision per user for all the items by ranking them and\nthen averages all the precisions across all the users.",
"type": "number",
"format": "double"
},
"meanSquaredError": {
"description": "Similar to the mean squared error computed in regression and explicit\nrecommendation models except instead of computing the rating directly,\nthe output from evaluate is computed against a preference which is 1 or 0\ndepending on if the rating exists or not.",
"type": "number",
"format": "double"
},
"normalizedDiscountedCumulativeGain": {
"description": "A metric to determine the goodness of a ranking calculated from the\npredicted confidence by comparing it to an ideal rank measured by the\noriginal ratings.",
"type": "number",
"format": "double"
},
"averageRank": {
"description": "Determines the goodness of a ranking by computing the percentile rank\nfrom the predicted confidence and dividing it by the original rank.",
"type": "number",
"format": "double"
}
}
},
"DataSplitResult": {
"id": "DataSplitResult",
"description": "Data split result. This contains references to the training and evaluation\ndata tables that were used to train the model.",
"type": "object",
"properties": {
"trainingTable": {
"description": "Table reference of the training data after split.",
"$ref": "TableReference"
},
"evaluationTable": {
"description": "Table reference of the evaluation data after split.",
"$ref": "TableReference"
}
}
},
"StandardSqlField": {
"id": "StandardSqlField",
"description": "A field or a column.",
"type": "object",
"properties": {
"name": {
"description": "Optional. The name of this field. Can be absent for struct fields.",
"type": "string"
},
"type": {
"description": "Optional. The type of this parameter. Absent if not explicitly\nspecified (e.g., CREATE FUNCTION statement can omit the return type;\nin this case the output parameter does not have this \"type\" field).",
"$ref": "StandardSqlDataType"
}
}
},
"StandardSqlDataType": {
"id": "StandardSqlDataType",
"description": "The type of a variable, e.g., a function argument.\nExamples:\nINT64: {type_kind=\"INT64\"}\nARRAY<STRING>: {type_kind=\"ARRAY\", array_element_type=\"STRING\"}\nSTRUCT<x STRING, y ARRAY<DATE>>:\n {type_kind=\"STRUCT\",\n struct_type={fields=[\n {name=\"x\", type={type_kind=\"STRING\"}},\n {name=\"y\", type={type_kind=\"ARRAY\", array_element_type=\"DATE\"}}\n ]}}",
"type": "object",
"properties": {
"typeKind": {
"description": "Required. The top level type of this field.\nCan be any standard SQL data type (e.g., \"INT64\", \"DATE\", \"ARRAY\").",
"enumDescriptions": [
"Invalid type.",
"Encoded as a string in decimal format.",
"Encoded as a boolean \"false\" or \"true\".",
"Encoded as a number, or string \"NaN\", \"Infinity\" or \"-Infinity\".",
"Encoded as a string value.",
"Encoded as a base64 string per RFC 4648, section 4.",
"Encoded as an RFC 3339 timestamp with mandatory \"Z\" time zone string:\n1985-04-12T23:20:50.52Z",
"Encoded as RFC 3339 full-date format string: 1985-04-12",
"Encoded as RFC 3339 partial-time format string: 23:20:50.52",
"Encoded as RFC 3339 full-date \"T\" partial-time: 1985-04-12T23:20:50.52",
"Encoded as WKT",
"Encoded as a decimal string.",
"Encoded as a list with types matching Type.array_type.",
"Encoded as a list with fields of type Type.struct_type[i]. List is used\nbecause a JSON object cannot have duplicate field names."
],
"type": "string",
"enum": [
"TYPE_KIND_UNSPECIFIED",
"INT64",
"BOOL",
"FLOAT64",
"STRING",
"BYTES",
"TIMESTAMP",
"DATE",
"TIME",
"DATETIME",
"GEOGRAPHY",
"NUMERIC",
"ARRAY",
"STRUCT"
]
},
"arrayElementType": {
"description": "The type of the array's elements, if type_kind = \"ARRAY\".",
"$ref": "StandardSqlDataType"
},
"structType": {
"description": "The fields of this struct, in order, if type_kind = \"STRUCT\".",
"$ref": "StandardSqlStructType"
}
}
},
"StandardSqlStructType": {
"id": "StandardSqlStructType",
"type": "object",
"properties": {
"fields": {
"type": "array",
"items": {
"$ref": "StandardSqlField"
}
}
}
},
"ListModelsResponse": {
"id": "ListModelsResponse",
"type": "object",
"properties": {
"models": {
"description": "Models in the requested dataset. Only the following fields are populated:\nmodel_reference, model_type, creation_time, last_modified_time and\nlabels.",
"type": "array",
"items": {
"$ref": "Model"
}
},
"nextPageToken": {
"description": "A token to request the next page of results.",
"type": "string"
}
}
},
"JobCancelResponse": {
"id": "JobCancelResponse",
"type": "object",
"properties": {
"kind": {
"description": "The resource type of the response.",
"type": "string"
},
"job": {
"description": "The final state of the job.",
"$ref": "Job"
}
}
},
"Job": {
"id": "Job",
"type": "object",
"properties": {
"kind": {
"description": "Output only. The type of the resource.",
"type": "string"
},
"etag": {
"description": "Output only. A hash of this resource.",
"type": "string"
},
"id": {
"description": "Output only. Opaque ID field of the job.",
"type": "string"
},
"selfLink": {
"description": "Output only. A URL that can be used to access the resource again.",
"type": "string"
},
"user_email": {
"description": "Output only. Email address of the user who ran the job.",
"type": "string"
},
"configuration": {
"description": "Required. Describes the job configuration.",
"$ref": "JobConfiguration"
},
"jobReference": {
"description": "Optional. Reference describing the unique-per-user name of the job.",
"$ref": "JobReference"
},
"statistics": {
"description": "Output only. Information about the job, including starting time and ending\ntime of the job.",
"$ref": "JobStatistics"
},
"status": {
"description": "Output only. The status of this job. Examine this value when polling an\nasynchronous job to see if the job is complete.",
"$ref": "JobStatus"
}
}
},
"JobConfiguration": {
"id": "JobConfiguration",
"type": "object",
"properties": {
"jobType": {
"description": "Output only. The type of the job. Can be QUERY, LOAD, EXTRACT, COPY or\nUNKNOWN.",
"type": "string"
},
"query": {
"description": "[Pick one] Configures a query job.",
"$ref": "JobConfigurationQuery"
},
"load": {
"description": "[Pick one] Configures a load job.",
"$ref": "JobConfigurationLoad"
},
"copy": {
"description": "[Pick one] Copies a table.",
"$ref": "JobConfigurationTableCopy"
},
"extract": {
"description": "[Pick one] Configures an extract job.",
"$ref": "JobConfigurationExtract"
},
"dryRun": {
"description": "[Optional] If set, don't actually run this job. A valid query will return\na mostly empty response with some processing statistics, while an invalid\nquery will return the same error it would if it wasn't a dry run. Behavior\nof non-query jobs is undefined.",
"type": "boolean"
},
"jobTimeoutMs": {
"description": "[Optional] Job timeout in milliseconds. If this time limit is exceeded,\nBigQuery may attempt to terminate the job.",
"type": "string",
"format": "int64"
},
"labels": {
"description": "The labels associated with this job. You can use these to organize and\ngroup your jobs.\nLabel keys and values can be no longer than 63 characters, can only contain\nlowercase letters, numeric characters, underscores and dashes.\nInternational characters are allowed. Label values are optional. Label\nkeys must start with a letter and each label in the list must have a\ndifferent key.",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
},
"JobConfigurationQuery": {
"id": "JobConfigurationQuery",
"description": "JobConfigurationQuery configures a BigQuery query job.",
"type": "object",
"properties": {
"query": {
"description": "[Required] SQL query text to execute. The useLegacySql field can be used\nto indicate whether the query uses legacy SQL or standard SQL.",
"type": "string"
},
"destinationTable": {
"description": "[Optional] Describes the table where the query results should be stored.\nThis property must be set for large results that exceed the maximum\nresponse size. For queries that produce anonymous (cached) results, this\nfield will be populated by BigQuery.",
"$ref": "TableReference"
},
"tableDefinitions": {
"type": "object",
"additionalProperties": {
"$ref": "ExternalDataConfiguration"
}
},
"userDefinedFunctionResources": {
"description": "Describes user-defined function resources used in the query.",
"type": "array",
"items": {
"$ref": "UserDefinedFunctionResource"
}
},
"createDisposition": {
"description": "[Optional] Specifies whether the job is allowed to create new tables.\nThe following values are supported:\n\n* CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\ntable.\n* CREATE_NEVER: The table must already exist. If it does not,\na 'notFound' error is returned in the job result.\n\nThe default value is CREATE_IF_NEEDED.\nCreation, truncation and append actions occur as one atomic update\nupon job completion.",
"type": "string"
},
"writeDisposition": {
"description": "[Optional] Specifies the action that occurs if the destination table\nalready exists. The following values are supported:\n\n* WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\ntable data and uses the schema from the query result.\n* WRITE_APPEND: If the table already exists, BigQuery appends the data to\nthe table.\n* WRITE_EMPTY: If the table already exists and contains data, a 'duplicate'\nerror is returned in the job result.\n\nThe default value is WRITE_EMPTY. Each action is atomic and only occurs if\nBigQuery is able to complete the job successfully. Creation, truncation and\nappend actions occur as one atomic update upon job completion.",
"type": "string"
},
"defaultDataset": {
"description": "[Optional] Specifies the default dataset to use for unqualified\ntable names in the query. Note that this does not alter behavior of\nunqualified dataset names.",
"$ref": "DatasetReference"
},
"priority": {
"description": "[Optional] Specifies a priority for the query. Possible values include\nINTERACTIVE and BATCH. The default value is INTERACTIVE.",
"type": "string"
},
"preserveNulls": {
"description": "[Deprecated] This property is deprecated.",
"type": "boolean"
},
"allowLargeResults": {
"description": "[Optional] If true and query uses legacy SQL dialect, allows the query\nto produce arbitrarily large result tables at a slight cost in performance.\nRequires destinationTable to be set.\nFor standard SQL queries, this flag is ignored and large results are\nalways allowed. However, you must still set destinationTable when result\nsize exceeds the allowed maximum response size.",
"type": "boolean"
},
"useQueryCache": {
"description": "[Optional] Whether to look for the result in the query cache. The query\ncache is a best-effort cache that will be flushed whenever tables in the\nquery are modified. Moreover, the query cache is only available when a\nquery does not have a destination table specified. The default value is\ntrue.",
"default": "true",
"type": "boolean"
},
"flattenResults": {
"description": "[Optional] If true and query uses legacy SQL dialect, flattens all nested\nand repeated fields in the query results.\nallowLargeResults must be true if this is set to false.\nFor standard SQL queries, this flag is ignored and results are never\nflattened.",
"type": "boolean"
},
"maximumBillingTier": {
"description": "[Optional] Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge).\nIf unspecified, this will be set to your project default.",
"type": "integer",
"format": "int32"
},
"maximumBytesBilled": {
"description": "[Optional] Limits the bytes billed for this job. Queries that will have\nbytes billed beyond this limit will fail (without incurring a charge).\nIf unspecified, this will be set to your project default.",
"type": "string",
"format": "int64"
},
"useLegacySql": {
"description": "Specifies whether to use BigQuery's legacy SQL dialect for this query.\nThe default value is true. If set to false, the query will use\nBigQuery's standard SQL:\nhttps:\/\/cloud.google.com\/bigquery\/sql-reference\/\n\nWhen useLegacySql is set to false, the value of flattenResults is ignored;\nquery will be run as if flattenResults is false.",
"type": "boolean"
},
"parameterMode": {
"description": "Standard SQL only. Set to POSITIONAL to use positional (?) query parameters\nor to NAMED to use named (@myparam) query parameters in this query.",
"type": "string"
},
"queryParameters": {
"description": "Query parameters for standard SQL queries.",
"type": "array",
"items": {
"$ref": "QueryParameter"
}
},
"schemaUpdateOptions": {
"description": "Allows the schema of the destination table to be updated as a side effect\nof the query job. Schema update options are supported in two cases:\nwhen writeDisposition is WRITE_APPEND;\nwhen writeDisposition is WRITE_TRUNCATE and the destination table is a\npartition of a table, specified by partition decorators. For normal tables,\nWRITE_TRUNCATE will always overwrite the schema.\nOne or more of the following values are specified:\n\n* ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.\n* ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original\nschema to nullable.",
"type": "array",
"items": {
"type": "string"
}
},
"timePartitioning": {
"description": "Time-based partitioning specification for the destination table. Only one\nof timePartitioning and rangePartitioning should be specified.",
"$ref": "TimePartitioning"
},
"rangePartitioning": {
"description": "[Experimental] Range partitioning specification for the destination table.\nOnly one of timePartitioning and rangePartitioning should be specified.",
"$ref": "RangePartitioning"
},
"clustering": {
"description": "[Experimental] Clustering specification for the destination table. Must be\nspecified with time-based partitioning, data in the table will be first\npartitioned and subsequently clustered.",
"$ref": "Clustering"
},
"destinationEncryptionConfiguration": {
"description": "Custom encryption configuration (e.g., Cloud KMS keys)",
"$ref": "EncryptionConfiguration"
},
"scriptOptions": {
"description": "Options controlling the execution of scripts.",
"$ref": "ScriptOptions"
},
"connectionProperties": {
"description": "Connection properties which can modify the query behavior.",
"type": "array",
"items": {
"$ref": "ConnectionProperty"
}
}
}
},
"ExternalDataConfiguration": {
"id": "ExternalDataConfiguration",
"type": "object",
"properties": {
"sourceUris": {
"description": "[Required] The fully-qualified URIs that point to your data in Google\nCloud. For Google Cloud Storage URIs:\n Each URI can contain one '*' wildcard character and it must come after\n the 'bucket' name.\n Size limits related to load jobs apply to external data sources.\nFor Google Cloud Bigtable URIs:\n Exactly one URI can be specified and it has be a fully specified and\n valid HTTPS URL for a Google Cloud Bigtable table.\nFor Google Cloud Datastore backups, exactly one URI can be specified. Also,\nthe '*' wildcard character is not allowed.",
"type": "array",
"items": {
"type": "string"
}
},
"schema": {
"description": "[Optional] The schema for the data.\nSchema is required for CSV and JSON formats.\nSchema is disallowed for Google Cloud Bigtable, Cloud Datastore backups,\nand Avro formats.",
"$ref": "TableSchema"
},
"sourceFormat": {
"description": "[Required] The data format.\nFor CSV files, specify \"CSV\".\nFor Google sheets, specify \"GOOGLE_SHEETS\".\nFor newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\".\nFor Avro files, specify \"AVRO\".\nFor Google Cloud Datastore backups, specify \"DATASTORE_BACKUP\".\n[Beta] For Google Cloud Bigtable, specify \"BIGTABLE\".",
"type": "string"
},
"maxBadRecords": {
"description": "[Optional] The maximum number of bad records that BigQuery can ignore when\nreading data. If the number of bad records exceeds this value, an invalid\nerror is returned in the job result. The default value is 0, which requires\nthat all records are valid. This setting is ignored for Google Cloud\nBigtable, Google Cloud Datastore backups and Avro formats.",
"type": "integer",
"format": "int32"
},
"autodetect": {
"description": "Try to detect schema and format options automatically.\nAny option specified explicitly will be honored.",
"type": "boolean"
},
"ignoreUnknownValues": {
"description": "[Optional] Indicates if BigQuery should allow extra values that are not\nrepresented in the table schema.\nIf true, the extra values are ignored.\nIf false, records with extra columns are treated as bad records, and if\nthere are too many bad records, an invalid error is returned in the job\nresult.\nThe default value is false.\nThe sourceFormat property determines what BigQuery treats as an extra\nvalue:\n CSV: Trailing columns\n JSON: Named values that don't match any column names\n Google Cloud Bigtable: This setting is ignored.\n Google Cloud Datastore backups: This setting is ignored.\n Avro: This setting is ignored.",
"type": "boolean"
},
"compression": {
"description": "[Optional] The compression type of the data source.\nPossible values include GZIP and NONE. The default value is NONE.\nThis setting is ignored for Google Cloud Bigtable, Google Cloud Datastore\nbackups and Avro formats. An empty string is an invalid value.",
"type": "string"
},
"csvOptions": {
"description": "Additional properties to set if sourceFormat is set to CSV.",
"$ref": "CsvOptions"
},
"bigtableOptions": {
"description": "[Optional] Additional options if sourceFormat is set to BIGTABLE.",
"$ref": "BigtableOptions"
},
"googleSheetsOptions": {
"description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS.",
"$ref": "GoogleSheetsOptions"
},
"hivePartitioningOptions": {
"description": "[Optional] When set, configures hive partitioning support. Not all storage\nformats support hive partitioning -- requesting hive partitioning on an\nunsupported format will lead to an error, as will providing an invalid\nspecification.",
"$ref": "HivePartitioningOptions"
},
"connectionId": {
"description": "Optional This field is in testing and should not yet be\nused.",
"type": "string"
}
}
},
"TableSchema": {
"id": "TableSchema",
"description": "Schema of a table",
"type": "object",
"properties": {
"fields": {
"description": "Describes the fields in a table.",
"type": "array",
"items": {
"$ref": "TableFieldSchema"
}
}
}
},
"TableFieldSchema": {
"id": "TableFieldSchema",
"description": "A field in TableSchema",
"type": "object",
"properties": {
"name": {
"description": "Required. The field name. The name must contain only letters (a-z, A-Z),\nnumbers (0-9), or underscores (_), and must start with a letter or\nunderscore. The maximum length is 128 characters.",
"type": "string"
},
"type": {
"description": "Required. The field data type. Possible values include STRING, BYTES,\nINTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), BOOLEAN,\nBOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD (where\nRECORD indicates that the field contains a nested schema) or STRUCT (same\nas RECORD).",
"type": "string"
},
"mode": {
"description": "Optional. The field mode. Possible values include NULLABLE, REQUIRED and\nREPEATED. The default value is NULLABLE.",
"type": "string"
},
"fields": {
"description": "Optional. Describes the nested schema fields if the type property is set\nto RECORD.",
"type": "array",
"items": {
"$ref": "TableFieldSchema"
}
},
"description": {
"description": "Optional. The field description. The maximum length is 1,024 characters.",
"type": "string"
},
"policyTags": {
"description": "Optional. The policy tags attached to this field, used for field-level access\ncontrol. If not set, defaults to empty policy_tags.",
"type": "object",
"properties": {
"names": {
"description": "A list of policy tag resource names. For example,\n\"projects\/1\/locations\/eu\/taxonomies\/2\/policyTags\/3\". At most 1 policy tag\nis currently allowed.",
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
},
"CsvOptions": {
"id": "CsvOptions",
"description": "Options for external data sources.",
"type": "object",
"properties": {
"fieldDelimiter": {
"description": "[Optional] The separator character for fields in a CSV file. The separator\nis interpreted as a single byte. For files encoded in ISO-8859-1, any\nsingle character can be used as a separator. For files encoded in UTF-8,\ncharacters represented in decimal range 1-127 (U+0001-U+007F) can be used\nwithout any modification. UTF-8 characters encoded with multiple bytes\n(i.e. U+0080 and above) will have only the first byte used for separating\nfields. The remaining bytes will be treated as a part of the field.\nBigQuery also supports the escape sequence \"\\t\" (U+0009) to specify a tab\nseparator. The default value is comma (\",\", U+002C).",
"type": "string"
},
"skipLeadingRows": {
"description": "[Optional] The number of rows at the top of a CSV file that BigQuery will\nskip when reading the data. The default value is 0. This property is\nuseful if you have header rows in the file that should be skipped.\nWhen autodetect is on, the behavior is the following:\n\n* skipLeadingRows unspecified - Autodetect tries to detect headers in the\n first row. If they are not detected, the row is read as data. Otherwise\n data is read starting from the second row.\n* skipLeadingRows is 0 - Instructs autodetect that there are no headers and\n data should be read starting from the first row.\n* skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect\n headers in row N. If headers are not detected, row N is just skipped.\n Otherwise row N is used to extract column names for the detected schema.",
"type": "string",
"format": "int64"
},
"quote": {
"description": "[Optional] The value that is used to quote data sections in a CSV file.\nBigQuery converts the string to ISO-8859-1 encoding, and then uses the\nfirst byte of the encoded string to split the data in its raw, binary\nstate.\nThe default value is a double-quote (\").\nIf your data does not contain quoted sections,\nset the property value to an empty string.\nIf your data contains quoted newline characters, you must also set the\nallowQuotedNewlines property to true.",
"pattern": ".?",
"type": "string"
},
"allowQuotedNewlines": {
"description": "[Optional] Indicates if BigQuery should allow quoted data sections that\ncontain newline characters in a CSV file. The default value is false.",
"type": "boolean"
},
"allowJaggedRows": {
"description": "[Optional] Indicates if BigQuery should accept rows that are missing\ntrailing optional columns. If true, BigQuery treats missing trailing\ncolumns as null values.\nIf false, records with missing trailing columns are treated as bad records,\nand if there are too many bad records, an invalid error is returned in the\njob result. The default value is false.",
"type": "boolean"
},
"encoding": {
"description": "[Optional] The character encoding of the data.\nThe supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.\nBigQuery decodes the data after the raw, binary data has been split using\nthe values of the quote and fieldDelimiter properties.",
"type": "string"
}
}
},
"BigtableOptions": {
"id": "BigtableOptions",
"type": "object",
"properties": {
"columnFamilies": {
"description": "[Optional] List of column families to expose in the table schema along with\ntheir types.\nThis list restricts the column families that can be referenced in queries\nand specifies their value types.\nYou can use this list to do type conversions - see the 'type' field for\nmore details.\nIf you leave this list empty, all column families are present in the table\nschema and their values are read as BYTES.\nDuring a query only the column families referenced in that query are read\nfrom Bigtable.",
"type": "array",
"items": {
"$ref": "BigtableColumnFamily"
}
},
"ignoreUnspecifiedColumnFamilies": {
"description": "[Optional] If field is true, then the column families that are not\nspecified in columnFamilies list are not exposed in the table schema.\nOtherwise, they are read with BYTES type values.\nThe default value is false.",
"type": "boolean"
},
"readRowkeyAsString": {
"description": "[Optional] If field is true, then the rowkey column families will be read\nand converted to string. Otherwise they are read with BYTES type values and\nusers need to manually cast them with CAST if necessary.\nThe default value is false.",
"type": "boolean"
}
}
},
"BigtableColumnFamily": {
"id": "BigtableColumnFamily",
"type": "object",
"properties": {
"familyId": {
"description": "Identifier of the column family.",
"type": "string"
},
"type": {
"description": "[Optional] The type to convert the value in cells of this column family.\nThe values are expected to be encoded using HBase Bytes.toBytes function\nwhen using the BINARY encoding value.\nFollowing BigQuery types are allowed (case-sensitive) -\n BYTES\n STRING\n INTEGER\n FLOAT\n BOOLEAN\nDefault type is BYTES.\nThis can be overridden for a specific column by listing that column in\n'columns' and specifying a type for it.",
"type": "string"
},
"encoding": {
"description": "[Optional] The encoding of the values when the type is not STRING.\nAcceptable encoding values are:\n TEXT - indicates values are alphanumeric text strings.\n BINARY - indicates values are encoded using HBase Bytes.toBytes family of\n functions.\nThis can be overridden for a specific column by listing that column in\n'columns' and specifying an encoding for it.",
"type": "string"
},
"columns": {
"description": "[Optional] Lists of columns that should be exposed as individual fields as\nopposed to a list of (column name, value) pairs.\nAll columns whose qualifier matches a qualifier in this list can be\naccessed as <family field name>.<column field name>.\nOther columns can be accessed as a list through <family field name>.Column\nfield.",
"type": "array",
"items": {
"$ref": "BigtableColumn"
}
},
"onlyReadLatest": {
"description": "[Optional] If this is set only the latest version of value are exposed for\nall columns in this column family.\nThis can be overridden for a specific column by listing that column in\n'columns' and specifying a different setting\nfor that column.",
"type": "boolean"
}
}
},
"BigtableColumn": {
"id": "BigtableColumn",
"type": "object",
"properties": {
"qualifierEncoded": {
"description": "[Required] Qualifier of the column.\nColumns in the parent column family that has this exact qualifier are\nexposed as <family field name>.<column field name> field.\nIf the qualifier is valid UTF-8 string, it can be specified in the\nqualifier_string field. Otherwise, a base-64 encoded value must be set to\nqualifier_encoded.\nThe column field name is the same as the column qualifier. However, if the\nqualifier is not a valid BigQuery field identifier i.e. does not match\na-zA-Z*, a valid identifier must be provided as field_name.",
"type": "string",
"format": "byte"
},
"qualifierString": {
"type": "string"
},
"fieldName": {
"description": "[Optional] If the qualifier is not a valid BigQuery field identifier i.e.\ndoes not match a-zA-Z*, a valid identifier must be provided\nas the column field name and is used as field name in queries.",
"type": "string"
},
"type": {
"description": "[Optional] The type to convert the value in cells of this column.\nThe values are expected to be encoded using HBase Bytes.toBytes function\nwhen using the BINARY encoding value.\nFollowing BigQuery types are allowed (case-sensitive) -\n BYTES\n STRING\n INTEGER\n FLOAT\n BOOLEAN\nDefault type is BYTES.\n'type' can also be set at the column family level. However, the setting at\nthis level takes precedence if 'type' is set at both levels.",
"type": "string"
},
"encoding": {
"description": "[Optional] The encoding of the values when the type is not STRING.\nAcceptable encoding values are:\n TEXT - indicates values are alphanumeric text strings.\n BINARY - indicates values are encoded using HBase Bytes.toBytes family of\n functions.\n'encoding' can also be set at the column family level. However, the setting\nat this level takes precedence if 'encoding' is set at both levels.",
"type": "string"
},
"onlyReadLatest": {
"description": "[Optional] If this is set, only the latest version of value in this column\n are exposed.\n'onlyReadLatest' can also be set at the column family level. However, the\nsetting at this level takes precedence if 'onlyReadLatest' is set at both\nlevels.",
"type": "boolean"
}
}
},
"GoogleSheetsOptions": {
"id": "GoogleSheetsOptions",
"type": "object",
"properties": {
"skipLeadingRows": {
"description": "[Optional] The number of rows at the top of a sheet that BigQuery will skip\nwhen reading the data. The default value is 0. This property is useful if\nyou have header rows that should be skipped. When autodetect is on,\nthe behavior is the following:\n* skipLeadingRows unspecified - Autodetect tries to detect headers in the\n first row. If they are not detected, the row is read as data. Otherwise\n data is read starting from the second row.\n* skipLeadingRows is 0 - Instructs autodetect that there are no headers and\n data should be read starting from the first row.\n* skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect\n headers in row N. If headers are not detected, row N is just skipped.\n Otherwise row N is used to extract column names for the detected schema.",
"type": "string",
"format": "int64"
},
"range": {
"description": "[Optional] Range of a sheet to query from. Only used when non-empty.\nTypical format: sheet_name!top_left_cell_id:bottom_right_cell_id\nFor example: sheet1!A1:B20",
"type": "string"
}
}
},
"HivePartitioningOptions": {
"id": "HivePartitioningOptions",
"description": "Options for configuring hive partitioning detect.",
"type": "object",
"properties": {
"mode": {
"description": "Optional. When set, what mode of hive partitioning to use when reading data. Two\nmodes are supported.\n* AUTO: automatically infer partition key name(s) and type(s).\n* STRINGS: automatically infer partition key name(s). All types are\nstrings.\n* CUSTOM: partition key schema is encoded in the source URI prefix.\nNot all storage formats support hive partitioning. Requesting hive\npartitioning on an unsupported format will lead to an error.\nCurrently supported formats are: JSON, CSV, ORC, Avro and Parquet.",
"type": "string"
},
"sourceUriPrefix": {
"description": "Optional. When hive partition detection is requested, a common prefix for all source\nuris must be required. The prefix must end immediately before the\npartition key encoding begins. For example, consider files following this\ndata layout:\ngs:\/\/bucket\/path_to_table\/dt=2019-06-01\/country=USA\/id=7\/file.avro\ngs:\/\/bucket\/path_to_table\/dt=2019-05-31\/country=CA\/id=3\/file.avro\nWhen hive partitioning is requested with either AUTO or STRINGS detection,\nthe common prefix can be either of gs:\/\/bucket\/path_to_table or\ngs:\/\/bucket\/path_to_table\/.\nCUSTOM detection requires encoding the partitioning schema immediately\nafter the common prefix. For CUSTOM, any of\n* gs:\/\/bucket\/path_to_table\/{dt:DATE}\/{country:STRING}\/{id:INTEGER}\n* gs:\/\/bucket\/path_to_table\/{dt:STRING}\/{country:STRING}\/{id:INTEGER}\n* gs:\/\/bucket\/path_to_table\/{dt:DATE}\/{country:STRING}\/{id:STRING}\nwould all be valid source URI prefixes.",
"type": "string"
},
"requirePartitionFilter": {
"description": "Optional. If set to true, queries over this table require a partition filter that can\nbe used for partition elimination to be specified. Note that this field\nshould only be true when creating a permanent external table or querying a\ntemporary external table.\nHive-partitioned loads with require_partition_filter explicitly set to\ntrue will fail.",
"default": "false",
"type": "boolean"
},
"fields": {
"description": "Output only. For permanent external tables, this field is populated with the hive\npartition keys in the order they were inferred. The types of the partition\nkeys can be deduced by checking the table schema (which will include the\npartition keys). Not every API will populate this field in the output. For\nexample, Tables.Get will populate it, but Tables.List will not contain this\nfield.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"UserDefinedFunctionResource": {
"id": "UserDefinedFunctionResource",
"type": "object",
"properties": {
"resourceUri": {
"description": "[Pick one] A code resource to load from a Google Cloud Storage URI\n(gs:\/\/bucket\/path).",
"type": "string"
},
"inlineCode": {
"description": "[Pick one] An inline resource that contains code for a user-defined\nfunction (UDF). Providing a inline code resource is equivalent to providing\na URI for a file containing the same code.",
"type": "string"
}
}
},
"QueryParameter": {
"id": "QueryParameter",
"description": "A parameter given to a query.",
"type": "object",
"properties": {
"name": {
"description": "Optional. If unset, this is a positional parameter. Otherwise, should be\nunique within a query.",
"type": "string"
},
"parameterType": {
"description": "Required. The type of this parameter.",
"$ref": "QueryParameterType"
},
"parameterValue": {
"description": "Required. The value of this parameter.",
"$ref": "QueryParameterValue"
}
}
},
"QueryParameterType": {
"id": "QueryParameterType",
"description": "The type of a query parameter.",
"type": "object",
"properties": {
"type": {
"description": "Required. The top level type of this field.",
"type": "string"
},
"arrayType": {
"description": "Optional. The type of the array's elements, if this is an array.",
"$ref": "QueryParameterType"
},
"structTypes": {
"description": "Optional. The types of the fields of this struct, in order, if this is a\nstruct.",
"type": "array",
"items": {
"description": "The type of a struct parameter.",
"type": "object",
"properties": {
"name": {
"description": "Optional. The name of this field.",
"type": "string"
},
"type": {
"description": "Required. The type of this field.",
"$ref": "QueryParameterType"
},
"description": {
"description": "Optional. Human-oriented description of the field.",
"type": "string"
}
}
}
}
}
},
"QueryParameterValue": {
"id": "QueryParameterValue",
"description": "The value of a query parameter.",
"type": "object",
"properties": {
"value": {
"description": "Optional. The value of this value, if a simple scalar type.",
"type": "string"
},
"arrayValues": {
"description": "Optional. The array values, if this is an array type.",
"type": "array",
"items": {
"$ref": "QueryParameterValue"
}
},
"structValues": {
"description": "The struct field values.",
"type": "object",
"additionalProperties": {
"$ref": "QueryParameterValue"
}
}
}
},
"TimePartitioning": {
"id": "TimePartitioning",
"type": "object",
"properties": {
"type": {
"description": "Required. The supported types are DAY, which will generate one partition per day,\nand HOUR, which will generate one partition per hour.",
"type": "string"
},
"expirationMs": {
"description": "Optional. Number of milliseconds for which to keep the storage for a\npartition.\nA wrapper is used here because 0 is an invalid value.",
"type": "string",
"format": "int64"
},
"field": {
"description": "Optional. If not set, the table is partitioned by pseudo\ncolumn '_PARTITIONTIME'; if set, the table is partitioned by this field.\nThe field must be a top-level TIMESTAMP or DATE field. Its mode must be\nNULLABLE or REQUIRED.\nA wrapper is used here because an empty string is an invalid value.",
"type": "string"
},
"requirePartitionFilter": {
"description": "If set to true, queries over this table require a\npartition filter that can be used for partition elimination to be\nspecified. This field is deprecated; please set the field with the same\nname on the table itself instead.\nThis field needs a wrapper because we want to output the default value,\nfalse, if the user explicitly set it.",
"default": "false",
"type": "boolean"
}
}
},
"RangePartitioning": {
"id": "RangePartitioning",
"type": "object",
"properties": {
"field": {
"description": "Required. [Experimental] The table is partitioned by this field. The field\nmust be a top-level NULLABLE\/REQUIRED field. The only supported type is\nINTEGER\/INT64.",
"type": "string"
},
"range": {
"description": "[Experimental] Defines the ranges for range partitioning.",
"type": "object",
"properties": {
"start": {
"description": "Required. [Experimental] The start of range partitioning, inclusive.",
"type": "string"
},
"end": {
"description": "Required. [Experimental] The end of range partitioning, exclusive.",
"type": "string"
},
"interval": {
"description": "Required. [Experimental] The width of each interval.",
"type": "string"
}
}
}
}
},
"Clustering": {
"id": "Clustering",
"description": "Configures table clustering.",
"type": "object",
"properties": {
"fields": {
"description": "One or more fields on which data should be clustered. Only top-level,\nnon-repeated, simple-type fields are supported. The order of the fields\nwill determine how clusters will be generated, so it is important.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"ScriptOptions": {
"id": "ScriptOptions",
"type": "object",
"properties": {
"statementTimeoutMs": {
"description": "Timeout period for each statement in a script.",
"type": "string",
"format": "int64"
},
"statementByteBudget": {
"description": "Limit on the number of bytes billed per statement. Exceeding this budget\nresults in an error.",
"type": "string",
"format": "int64"
},
"keyResultStatement": {
"description": "Determines which statement in the script represents the \"key result\",\nused to populate the schema and query results of the script job.\nDefault is LAST.",
"enumDescriptions": [
"",
"",
""
],
"type": "string",
"enum": [
"KEY_RESULT_STATEMENT_KIND_UNSPECIFIED",
"LAST",
"FIRST_SELECT"
]
}
}
},
"ConnectionProperty": {
"id": "ConnectionProperty",
"description": "A connection-level property to customize query behavior. Under JDBC, these\ncorrespond directly to connection properties passed to the DriverManager.\nUnder ODBC, these correspond to properties in the connection string.\n\nCurrently, the only supported connection property is \"time_zone\", whose value\nrepresents the default timezone used to run the query. Additional properties\nare allowed, but ignored. Specifying multiple connection properties with the\nsame key is an error.",
"type": "object",
"properties": {
"key": {
"description": "The key of the property to set.",
"type": "string"
},
"value": {
"description": "The value of the property to set.",
"type": "string"
}
}
},
"JobConfigurationLoad": {
"id": "JobConfigurationLoad",
"description": "JobConfigurationLoad contains the configuration properties for loading data\ninto a destination table.",
"type": "object",
"properties": {
"sourceUris": {
"description": "[Required] The fully-qualified URIs that point to your data in Google\nCloud.\nFor Google Cloud Storage URIs:\n Each URI can contain one '*' wildcard character and it must come after\n the 'bucket' name. Size limits related to load jobs apply to external\n data sources.\nFor Google Cloud Bigtable URIs:\n Exactly one URI can be specified and it has be a fully specified and\n valid HTTPS URL for a Google Cloud Bigtable table.\nFor Google Cloud Datastore backups:\n Exactly one URI can be specified. Also, the '*' wildcard character is not\n allowed.",
"type": "array",
"items": {
"type": "string"
}
},
"schema": {
"description": "[Optional] The schema for the destination table. The schema can be\nomitted if the destination table already exists, or if you're loading data\nfrom Google Cloud Datastore.",
"$ref": "TableSchema"
},
"destinationTable": {
"description": "[Required] The destination table to load the data into.",
"$ref": "TableReference"
},
"destinationTableProperties": {
"description": "Experimental Properties with which to create the destination\ntable if it is new.",
"$ref": "DestinationTableProperties"
},
"createDisposition": {
"description": "[Optional] Specifies whether the job is allowed to create new tables.\nThe following values are supported:\n\n* CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\ntable.\n* CREATE_NEVER: The table must already exist. If it does not,\na 'notFound' error is returned in the job result.\nThe default value is CREATE_IF_NEEDED.\nCreation, truncation and append actions occur as one atomic update\nupon job completion.",
"type": "string"
},
"writeDisposition": {
"description": "[Optional] Specifies the action that occurs if the destination table\nalready exists. The following values are supported:\n\n* WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\ntable data.\n* WRITE_APPEND: If the table already exists, BigQuery appends the data to\nthe table.\n* WRITE_EMPTY: If the table already exists and contains data, a 'duplicate'\nerror is returned in the job result.\n\nThe default value is WRITE_APPEND.\nEach action is atomic and only occurs if BigQuery is able to complete the\njob successfully.\nCreation, truncation and append actions occur as one atomic update\nupon job completion.",
"type": "string"
},
"nullMarker": {
"description": "[Optional] Specifies a string that represents a null value in a CSV file.\nFor example, if you specify \"\\N\", BigQuery interprets \"\\N\" as a null value\nwhen loading a CSV file.\nThe default value is the empty string. If you set this property to a custom\nvalue, BigQuery throws an error if an empty string is present for all data\ntypes except for STRING and BYTE. For STRING and BYTE columns, BigQuery\ninterprets the empty string as an empty value.",
"type": "string"
},
"fieldDelimiter": {
"description": "[Optional] The separator character for fields in a CSV file. The separator\nis interpreted as a single byte. For files encoded in ISO-8859-1, any\nsingle character can be used as a separator. For files encoded in UTF-8,\ncharacters represented in decimal range 1-127 (U+0001-U+007F) can be used\nwithout any modification. UTF-8 characters encoded with multiple bytes\n(i.e. U+0080 and above) will have only the first byte used for separating\nfields. The remaining bytes will be treated as a part of the field.\nBigQuery also supports the escape sequence \"\\t\" (U+0009) to specify a tab\nseparator. The default value is comma (\",\", U+002C).",
"type": "string"
},
"skipLeadingRows": {
"description": "[Optional] The number of rows at the top of a CSV file that BigQuery will\nskip when loading the data. The default value is 0. This property is useful\nif you have header rows in the file that should be skipped. When autodetect\nis on, the behavior is the following:\n\n* skipLeadingRows unspecified - Autodetect tries to detect headers in the\n first row. If they are not detected, the row is read as data. Otherwise\n data is read starting from the second row.\n* skipLeadingRows is 0 - Instructs autodetect that there are no headers and\n data should be read starting from the first row.\n* skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect\n headers in row N. If headers are not detected, row N is just skipped.\n Otherwise row N is used to extract column names for the detected schema.",
"type": "integer",
"format": "int32"
},
"encoding": {
"description": "[Optional] The character encoding of the data.\nThe supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.\nBigQuery decodes the data after the raw, binary data has been split using\nthe values of the quote and fieldDelimiter properties.",
"type": "string"
},
"quote": {
"description": "[Optional] The value that is used to quote data sections in a CSV file.\nBigQuery converts the string to ISO-8859-1 encoding, and then uses the\nfirst byte of the encoded string to split the data in its raw, binary\nstate.\nThe default value is a double-quote ('\"').\nIf your data does not contain quoted sections, set the property value to an\nempty string.\nIf your data contains quoted newline characters, you must also set the\nallowQuotedNewlines property to true.\n@default \"",
"pattern": ".?",
"type": "string"
},
"maxBadRecords": {
"description": "[Optional] The maximum number of bad records that BigQuery can ignore when\nrunning the job. If the number of bad records exceeds this value, an\ninvalid error is returned in the job result.\nThe default value is 0, which requires that all records are valid.",
"type": "integer",
"format": "int32"
},
"schemaInlineFormat": {
"description": "[Deprecated] The format of the schemaInline property.",
"type": "string"
},
"schemaInline": {
"description": "[Deprecated] The inline schema. For CSV schemas, specify as\n\"Field1:Type1[,Field2:Type2]*\". For example,\n\"foo:STRING, bar:INTEGER, baz:FLOAT\".",
"type": "string"
},
"allowQuotedNewlines": {
"description": "Indicates if BigQuery should allow quoted data sections that contain\nnewline characters in a CSV file. The default value is false.",
"type": "boolean"
},
"sourceFormat": {
"description": "[Optional] The format of the data files.\nFor CSV files, specify \"CSV\". For datastore backups,\nspecify \"DATASTORE_BACKUP\". For newline-delimited JSON,\nspecify \"NEWLINE_DELIMITED_JSON\". For Avro, specify \"AVRO\".\nFor parquet, specify \"PARQUET\". For orc, specify \"ORC\".\nThe default value is CSV.",
"type": "string"
},
"allowJaggedRows": {
"description": "[Optional] Accept rows that are missing trailing optional columns.\nThe missing values are treated as nulls.\nIf false, records with missing trailing columns are treated as bad records,\nand if there are too many bad records, an invalid error is returned in the\njob result.\nThe default value is false.\nOnly applicable to CSV, ignored for other formats.",
"type": "boolean"
},
"ignoreUnknownValues": {
"description": "[Optional] Indicates if BigQuery should allow extra values that are not\nrepresented in the table schema.\nIf true, the extra values are ignored.\nIf false, records with extra columns are treated as bad records, and if\nthere are too many bad records, an invalid error is returned in the job\nresult. The default value is false.\nThe sourceFormat property determines what BigQuery treats as an extra\nvalue:\n CSV: Trailing columns\n JSON: Named values that don't match any column names",
"type": "boolean"
},
"projectionFields": {
"description": "If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity\nproperties to load into BigQuery from a Cloud Datastore backup. Property\nnames are case sensitive and must be top-level properties. If no properties\nare specified, BigQuery loads all properties. If any named property isn't\nfound in the Cloud Datastore backup, an invalid error is returned in the\njob result.",
"type": "array",
"items": {
"type": "string"
}
},
"autodetect": {
"description": "[Optional] Indicates if we should automatically infer the options and\nschema for CSV and JSON sources.",
"type": "boolean"
},
"schemaUpdateOptions": {
"description": "Allows the schema of the destination table to be updated as a side effect\nof the load job if a schema is autodetected or supplied in the job\nconfiguration.\nSchema update options are supported in two cases:\nwhen writeDisposition is WRITE_APPEND;\nwhen writeDisposition is WRITE_TRUNCATE and the destination table is a\npartition of a table, specified by partition decorators. For normal tables,\nWRITE_TRUNCATE will always overwrite the schema.\nOne or more of the following values are specified:\n\n* ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.\n* ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original\nschema to nullable.",
"type": "array",
"items": {
"type": "string"
}
},
"timePartitioning": {
"description": "Time-based partitioning specification for the destination table. Only one\nof timePartitioning and rangePartitioning should be specified.",
"$ref": "TimePartitioning"
},
"rangePartitioning": {
"description": "[Experimental] Range partitioning specification for the destination table.\nOnly one of timePartitioning and rangePartitioning should be specified.",
"$ref": "RangePartitioning"
},
"clustering": {
"description": "[Experimental] Clustering specification for the destination table. Must be\nspecified with time-based partitioning, data in the table will be first\npartitioned and subsequently clustered.",
"$ref": "Clustering"
},
"destinationEncryptionConfiguration": {
"description": "Custom encryption configuration (e.g., Cloud KMS keys)",
"$ref": "EncryptionConfiguration"
},
"useAvroLogicalTypes": {
"description": "[Optional] If sourceFormat is set to \"AVRO\", indicates whether to enable\ninterpreting logical types into their corresponding types (ie. TIMESTAMP),\ninstead of only using their raw types (ie. INTEGER). The default value will\nbe true once this feature launches, but can be set now in preparation.",
"type": "boolean"
},
"hivePartitioningOptions": {
"description": "[Optional] When set, configures hive partitioning support.\nNot all storage formats support hive partitioning -- requesting hive\npartitioning on an unsupported format will lead to an error, as will\nproviding an invalid specification.",
"$ref": "HivePartitioningOptions"
}
}
},
"DestinationTableProperties": {
"id": "DestinationTableProperties",
"type": "object",
"properties": {
"friendlyName": {
"description": "[Optional] Friendly name for the destination table. If the table already\nexists, it should be same as the existing friendly name.",
"type": "string"
},
"description": {
"description": "[Optional] The description for the destination table.\nThis will only be used if the destination table is newly created.\nIf the table already exists and a value different than the current\ndescription is provided, the job will fail.",
"type": "string"
},
"labels": {
"description": "[Optional] The labels associated with this table. You can use these to\norganize and group your tables. This will only be used if the destination\ntable is newly created. If the table already exists and labels are\ndifferent than the current labels are provided, the job will fail.",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
},
"JobConfigurationTableCopy": {
"id": "JobConfigurationTableCopy",
"description": "JobConfigurationTableCopy configures a job that copies data from one table\nto another.",
"type": "object",
"properties": {
"sourceTable": {
"description": "[Pick one] Source table to copy.",
"$ref": "TableReference"
},
"sourceTables": {
"description": "[Pick one] Source tables to copy.",
"type": "array",
"items": {
"$ref": "TableReference"
}
},
"destinationTable": {
"description": "[Required] The destination table.",
"$ref": "TableReference"
},
"createDisposition": {
"description": "[Optional] Specifies whether the job is allowed to create new tables.\nThe following values are supported:\n\n* CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the\ntable.\n* CREATE_NEVER: The table must already exist. If it does not,\na 'notFound' error is returned in the job result.\n\nThe default value is CREATE_IF_NEEDED.\nCreation, truncation and append actions occur as one atomic update\nupon job completion.",
"type": "string"
},
"writeDisposition": {
"description": "[Optional] Specifies the action that occurs if the destination table\nalready exists. The following values are supported:\n\n* WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the\ntable data.\n* WRITE_APPEND: If the table already exists, BigQuery appends the data to\nthe table.\n* WRITE_EMPTY: If the table already exists and contains data, a 'duplicate'\nerror is returned in the job result.\n\nThe default value is WRITE_EMPTY. Each action is atomic and only occurs if\nBigQuery is able to complete the job successfully. Creation, truncation and\nappend actions occur as one atomic update upon job completion.",
"type": "string"
},
"destinationEncryptionConfiguration": {
"description": "Custom encryption configuration (e.g., Cloud KMS keys).",
"$ref": "EncryptionConfiguration"
}
}
},
"JobConfigurationExtract": {
"id": "JobConfigurationExtract",
"description": "JobConfigurationExtract configures a job that exports data from a BigQuery\ntable into Google Cloud Storage.",
"type": "object",
"properties": {
"sourceTable": {
"description": "A reference to the table being exported.",
"$ref": "TableReference"
},
"sourceModel": {
"description": "A reference to the model being exported.",
"$ref": "ModelReference"
},
"destinationUri": {
"description": "[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI\nas necessary.\nThe fully-qualified Google Cloud Storage URI where the\nextracted table should be written.",
"type": "string"
},
"destinationUris": {
"description": "[Pick one] A list of fully-qualified Google Cloud Storage URIs where the\nextracted table should be written.",
"type": "array",
"items": {
"type": "string"
}
},
"printHeader": {
"description": "[Optional] Whether to print out a header row in the results.\nDefault is true. Not applicable when extracting models.",
"default": "true",
"type": "boolean"
},
"fieldDelimiter": {
"description": "[Optional] When extracting data in CSV format, this defines the\ndelimiter to use between fields in the exported data.\nDefault is ','. Not applicable when extracting models.",
"type": "string"
},
"destinationFormat": {
"description": "[Optional] The exported file format. Possible values include CSV,\nNEWLINE_DELIMITED_JSON or AVRO for tables and ML_TF_SAVED_MODEL or\nML_XGBOOST_BOOSTER for models. The default value for tables is CSV. Tables\nwith nested or repeated fields cannot be exported as CSV. The default value\nfor models is ML_TF_SAVED_MODEL.",
"type": "string"
},
"compression": {
"description": "[Optional] The compression type to use for exported files. Possible values\ninclude GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE.\nDEFLATE and SNAPPY are only supported for Avro. Not applicable when\nextracting models.",
"type": "string"
},
"useAvroLogicalTypes": {
"description": "Whether to use logical types when extracting to AVRO format. Not applicable\nwhen extracting models.",
"type": "boolean"
}
}
},
"JobReference": {
"id": "JobReference",
"description": "A job reference is a fully qualified identifier for refering to a job.",
"type": "object",
"properties": {
"projectId": {
"description": "Required. The ID of the project containing this job.",
"type": "string"
},
"jobId": {
"description": "Required. The ID of the job. The ID must contain only letters (a-z, A-Z),\nnumbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024\ncharacters.",
"type": "string"
},
"location": {
"description": "Optional. The geographic location of the job. The default value is US.",
"type": "string"
}
}
},
"JobStatistics": {
"id": "JobStatistics",
"description": "Statistics for a single job execution.",
"type": "object",
"properties": {
"creationTime": {
"description": "Output only. Creation time of this job, in milliseconds since the epoch.\nThis field will be present on all jobs.",
"type": "string",
"format": "int64"
},
"startTime": {
"description": "Output only. Start time of this job, in milliseconds since the epoch.\nThis field will be present when the job transitions from the PENDING state\nto either RUNNING or DONE.",
"type": "string",
"format": "int64"
},
"endTime": {
"description": "Output only. End time of this job, in milliseconds since the epoch. This\nfield will be present whenever a job is in the DONE state.",
"type": "string",
"format": "int64"
},
"totalBytesProcessed": {
"description": "Output only. Total bytes processed for the job.",
"type": "string",
"format": "int64"
},
"completionRatio": {
"description": "Output only. [TrustedTester] Job progress (0.0 -> 1.0) for LOAD and\nEXTRACT jobs.",
"type": "number",
"format": "double"
},
"quotaDeferments": {
"description": "Output only. Ouput only. Quotas which delayed this job's start time.",
"type": "array",
"items": {
"type": "string"
}
},
"query": {
"description": "Output only. Statistics for a query job.",
"$ref": "JobStatistics2"
},
"load": {
"description": "Output only. Statistics for a load job.",
"$ref": "JobStatistics3"
},
"extract": {
"description": "Output only. Statistics for an extract job.",
"$ref": "JobStatistics4"
},
"totalSlotMs": {
"description": "Output only. Slot-milliseconds for the job.",
"type": "string",
"format": "int64"
},
"reservationUsage": {
"description": "Output only. Job resource usage breakdown by reservation.",
"type": "array",
"items": {
"description": "Job resource usage breakdown by reservation.",
"type": "object",
"properties": {
"name": {
"description": "Reservation name or \"unreserved\" for on-demand resources usage.",
"type": "string"
},
"slotMs": {
"description": "Total slot milliseconds used by the reservation for a particular job.",
"type": "string",
"format": "int64"
}
}
}
},
"reservation_id": {
"description": "Output only. Name of the primary reservation assigned to this job. Note\nthat this could be different than reservations reported in the reservation\nusage field if parent reservations were used to execute this job.",
"type": "string"
},
"numChildJobs": {
"description": "Output only. Number of child jobs executed.",
"type": "string",
"format": "int64"
},
"parentJobId": {
"description": "Output only. If this is a child job, specifies the job ID of the parent.",
"type": "string"
},
"scriptStatistics": {
"description": "Output only. If this a child job of a script, specifies information about the context\nof this job within the script.",
"$ref": "ScriptStatistics"
}
}
},
"JobStatistics2": {
"id": "JobStatistics2",
"description": "Statistics for a query job.",
"type": "object",
"properties": {
"queryPlan": {
"description": "Output only. Describes execution plan for the query.",
"type": "array",
"items": {
"$ref": "ExplainQueryStage"
}
},
"estimatedBytesProcessed": {
"description": "Output only. The original estimate of bytes processed for the job.",
"type": "string",
"format": "int64"
},
"timeline": {
"description": "Output only. [Beta] Describes a timeline of job execution.",
"type": "array",
"items": {
"$ref": "QueryTimelineSample"
}
},
"totalPartitionsProcessed": {
"description": "Output only. Total number of partitions processed from all partitioned\ntables referenced in the job.",
"type": "string",
"format": "int64"
},
"totalBytesProcessed": {
"description": "Output only. Total bytes processed for the job.",
"type": "string",
"format": "int64"
},
"totalBytesProcessedAccuracy": {
"description": "Output only. For dry-run jobs, totalBytesProcessed is an estimate and this\nfield specifies the accuracy of the estimate. Possible values can be:\nUNKNOWN: accuracy of the estimate is unknown.\nPRECISE: estimate is precise.\nLOWER_BOUND: estimate is lower bound of what the query would cost.\nUPPER_BOUND: estimate is upper bound of what the query would cost.",
"type": "string"
},
"totalBytesBilled": {
"description": "Output only. Total bytes billed for the job.",
"type": "string",
"format": "int64"
},
"billingTier": {
"description": "Output only. Billing tier for the job.",
"type": "integer",
"format": "int32"
},
"totalSlotMs": {
"description": "Output only. Slot-milliseconds for the job.",
"type": "string",
"format": "int64"
},
"reservationUsage": {
"description": "Output only. Job resource usage breakdown by reservation.",
"type": "array",
"items": {
"description": "Job resource usage breakdown by reservation.",
"type": "object",
"properties": {
"name": {
"description": "Reservation name or \"unreserved\" for on-demand resources usage.",
"type": "string"
},
"slotMs": {
"description": "Total slot milliseconds used by the reservation for a particular job.",
"type": "string",
"format": "int64"
}
}
}
},
"cacheHit": {
"description": "Output only. Whether the query result was fetched from the query cache.",
"type": "boolean"
},
"referencedTables": {
"description": "Output only. Referenced tables for the job. Queries that reference more\nthan 50 tables will not have a complete list.",
"type": "array",
"items": {
"$ref": "TableReference"
}
},
"referencedRoutines": {
"description": "Output only. Referenced routines for the job.",
"type": "array",
"items": {
"$ref": "RoutineReference"
}
},
"schema": {
"description": "Output only. The schema of the results. Present only for successful dry\nrun of non-legacy SQL queries.",
"$ref": "TableSchema"
},
"numDmlAffectedRows": {
"description": "Output only. The number of rows affected by a DML statement. Present\nonly for DML statements INSERT, UPDATE or DELETE.",
"type": "string",
"format": "int64"
},
"undeclaredQueryParameters": {
"description": "Output only. Standard SQL only: list of undeclared query\nparameters detected during a dry run validation.",
"type": "array",
"items": {
"$ref": "QueryParameter"
}
},
"statementType": {
"description": "Output only. The type of query statement, if valid.\nPossible values (new values might be added in the future):\n\"SELECT\": SELECT query.\n\"INSERT\": INSERT query; see\nhttps:\/\/cloud.google.com\/bigquery\/docs\/reference\/standard-sql\/data-manipulation-language.\n\"UPDATE\": UPDATE query; see\nhttps:\/\/cloud.google.com\/bigquery\/docs\/reference\/standard-sql\/data-manipulation-language.\n\"DELETE\": DELETE query; see\nhttps:\/\/cloud.google.com\/bigquery\/docs\/reference\/standard-sql\/data-manipulation-language.\n\"MERGE\": MERGE query; see\nhttps:\/\/cloud.google.com\/bigquery\/docs\/reference\/standard-sql\/data-manipulation-language.\n\"ALTER_TABLE\": ALTER TABLE query.\n\"ALTER_VIEW\": ALTER VIEW query.\n\"ASSERT\": ASSERT condition AS 'description'.\n\"CREATE_FUNCTION\": CREATE FUNCTION query.\n\"CREATE_MODEL\": CREATE [OR REPLACE] MODEL ... AS SELECT ... .\n\"CREATE_PROCEDURE\": CREATE PROCEDURE query.\n\"CREATE_TABLE\": CREATE [OR REPLACE] TABLE without AS SELECT.\n\"CREATE_TABLE_AS_SELECT\": CREATE [OR REPLACE] TABLE ... AS SELECT ... .\n\"CREATE_VIEW\": CREATE [OR REPLACE] VIEW ... AS SELECT ... .\n\"DROP_FUNCTION\" : DROP FUNCTION query.\n\"DROP_PROCEDURE\": DROP PROCEDURE query.\n\"DROP_TABLE\": DROP TABLE query.\n\"DROP_VIEW\": DROP VIEW query.",
"type": "string"
},
"ddlOperationPerformed": {
"description": "Output only. The DDL operation performed, possibly\ndependent on the pre-existence of the DDL target.",
"type": "string"
},
"ddlTargetTable": {
"description": "Output only. The DDL target table. Present only for\nCREATE\/DROP TABLE\/VIEW and DROP ALL ROW ACCESS POLICIES queries.",
"$ref": "TableReference"
},
"ddlTargetRoutine": {
"description": "Output only. [Beta] The DDL target routine. Present only for\nCREATE\/DROP FUNCTION\/PROCEDURE queries.",
"$ref": "RoutineReference"
},
"externalServiceCosts": {
"description": "Output only. Job cost breakdown as bigquery internal cost and external service costs.",
"type": "array",
"items": {
"$ref": "ExternalServiceCost"
}
}
}
},
"ExplainQueryStage": {
"id": "ExplainQueryStage",
"description": "A single stage of query execution.",
"type": "object",
"properties": {
"name": {
"description": "Human-readable name for the stage.",
"type": "string"
},
"id": {
"description": "Unique ID for the stage within the plan.",
"type": "string",
"format": "int64"
},
"startMs": {
"description": "Stage start time represented as milliseconds since the epoch.",
"type": "string",
"format": "int64"
},
"endMs": {
"description": "Stage end time represented as milliseconds since the epoch.",
"type": "string",
"format": "int64"
},
"inputStages": {
"description": "IDs for stages that are inputs to this stage.",
"type": "array",
"items": {
"type": "string",
"format": "int64"
}
},
"waitRatioAvg": {
"description": "Relative amount of time the average shard spent waiting to be\nscheduled.",
"type": "number",
"format": "double"
},
"waitMsAvg": {
"description": "Milliseconds the average shard spent waiting to be scheduled.",
"type": "string",
"format": "int64"
},
"waitRatioMax": {
"description": "Relative amount of time the slowest shard spent waiting to be\nscheduled.",
"type": "number",
"format": "double"
},
"waitMsMax": {
"description": "Milliseconds the slowest shard spent waiting to be scheduled.",
"type": "string",
"format": "int64"
},
"readRatioAvg": {
"description": "Relative amount of time the average shard spent reading input.",
"type": "number",
"format": "double"
},
"readMsAvg": {
"description": "Milliseconds the average shard spent reading input.",
"type": "string",
"format": "int64"
},
"readRatioMax": {
"description": "Relative amount of time the slowest shard spent reading input.",
"type": "number",
"format": "double"
},
"readMsMax": {
"description": "Milliseconds the slowest shard spent reading input.",
"type": "string",
"format": "int64"
},
"computeRatioAvg": {
"description": "Relative amount of time the average shard spent on CPU-bound tasks.",
"type": "number",
"format": "double"
},
"computeMsAvg": {
"description": "Milliseconds the average shard spent on CPU-bound tasks.",
"type": "string",
"format": "int64"
},
"computeRatioMax": {
"description": "Relative amount of time the slowest shard spent on CPU-bound tasks.",
"type": "number",
"format": "double"
},
"computeMsMax": {
"description": "Milliseconds the slowest shard spent on CPU-bound tasks.",
"type": "string",
"format": "int64"
},
"writeRatioAvg": {
"description": "Relative amount of time the average shard spent on writing output.",
"type": "number",
"format": "double"
},
"writeMsAvg": {
"description": "Milliseconds the average shard spent on writing output.",
"type": "string",
"format": "int64"
},
"writeRatioMax": {
"description": "Relative amount of time the slowest shard spent on writing output.",
"type": "number",
"format": "double"
},
"writeMsMax": {
"description": "Milliseconds the slowest shard spent on writing output.",
"type": "string",
"format": "int64"
},
"shuffleOutputBytes": {
"description": "Total number of bytes written to shuffle.",
"type": "string",
"format": "int64"
},
"shuffleOutputBytesSpilled": {
"description": "Total number of bytes written to shuffle and spilled to disk.",
"type": "string",
"format": "int64"
},
"recordsRead": {
"description": "Number of records read into the stage.",
"type": "string",
"format": "int64"
},
"recordsWritten": {
"description": "Number of records written by the stage.",
"type": "string",
"format": "int64"
},
"parallelInputs": {
"description": "Number of parallel input segments to be processed",
"type": "string",
"format": "int64"
},
"completedParallelInputs": {
"description": "Number of parallel input segments completed.",
"type": "string",
"format": "int64"
},
"status": {
"description": "Current status for this stage.",
"type": "string"
},
"steps": {
"description": "List of operations within the stage in dependency order (approximately\nchronological).",
"type": "array",
"items": {
"$ref": "ExplainQueryStep"
}
},
"slotMs": {
"description": "Slot-milliseconds used by the stage.",
"type": "string",
"format": "int64"
}
}
},
"ExplainQueryStep": {
"id": "ExplainQueryStep",
"description": "An operation within a stage.",
"type": "object",
"properties": {
"kind": {
"description": "Machine-readable operation type.",
"type": "string"
},
"substeps": {
"description": "Human-readable description of the step(s).",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"QueryTimelineSample": {
"id": "QueryTimelineSample",
"description": "Summary of the state of query execution at a given time.",
"type": "object",
"properties": {
"elapsedMs": {
"description": "Milliseconds elapsed since the start of query execution.",
"type": "string",
"format": "int64"
},
"totalSlotMs": {
"description": "Cumulative slot-ms consumed by the query.",
"type": "string",
"format": "int64"
},
"pendingUnits": {
"description": "Total parallel units of work remaining for the active stages.",
"type": "string",
"format": "int64"
},
"completedUnits": {
"description": "Total parallel units of work completed by this query.",
"type": "string",
"format": "int64"
},
"activeUnits": {
"description": "Total number of active workers. This does not correspond directly to\nslot usage. This is the largest value observed since the last sample.",
"type": "string",
"format": "int64"
}
}
},
"RoutineReference": {
"id": "RoutineReference",
"description": "Id path of a routine.",
"type": "object",
"properties": {
"projectId": {
"description": "Required. The ID of the project containing this routine.",
"type": "string"
},
"datasetId": {
"description": "Required. The ID of the dataset containing this routine.",
"type": "string"
},
"routineId": {
"description": "Required. The ID of the routine. The ID must contain only\nletters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum\nlength is 256 characters.",
"type": "string"
}
}
},
"ExternalServiceCost": {
"id": "ExternalServiceCost",
"description": "The external service cost is a portion of the total cost, these costs are not\nadditive with total_bytes_billed. Moreover, this field only track external\nservice costs that will show up as BigQuery costs (e.g. training BigQuery\nML job with google cloud CAIP or Automl Tables services), not other costs\nwhich may be accrued by running the query (e.g. reading from Bigtable or\nCloud Storage). The external service costs with different billing sku (e.g.\nCAIP job is charged based on VM usage) are converted to BigQuery\nbilled_bytes and slot_ms with equivalent amount of US dollars. Services may\nnot directly correlate to these metrics, but these are the equivalents for\nbilling purposes.\nOutput only.",
"type": "object",
"properties": {
"externalService": {
"description": "External service name.",
"type": "string"
},
"bytesProcessed": {
"description": "External service cost in terms of bigquery bytes processed.",
"type": "string",
"format": "int64"
},
"bytesBilled": {
"description": "External service cost in terms of bigquery bytes billed.",
"type": "string",
"format": "int64"
},
"slotMs": {
"description": "External service cost in terms of bigquery slot milliseconds.",
"type": "string",
"format": "int64"
}
}
},
"JobStatistics3": {
"id": "JobStatistics3",
"description": "Statistics for a load job.",
"type": "object",
"properties": {
"inputFiles": {
"description": "Output only. Number of source files in a load job.",
"type": "string",
"format": "int64"
},
"inputFileBytes": {
"description": "Output only. Number of bytes of source data in a load job.",
"type": "string",
"format": "int64"
},
"outputRows": {
"description": "Output only. Number of rows imported in a load job.\nNote that while an import job is in the running state, this\nvalue may change.",
"type": "string",
"format": "int64"
},
"outputBytes": {
"description": "Output only. Size of the loaded data in bytes. Note\nthat while a load job is in the running state, this value may change.",
"type": "string",
"format": "int64"
},
"badRecords": {
"description": "Output only. The number of bad records encountered. Note that if the job\nhas failed because of more bad records encountered than the maximum\nallowed in the load job configuration, then this number can be less than\nthe total number of bad records present in the input data.",
"type": "string",
"format": "int64"
}
}
},
"JobStatistics4": {
"id": "JobStatistics4",
"description": "Statistics for an extract job.",
"type": "object",
"properties": {
"destinationUriFileCounts": {
"description": "Output only. Number of files per destination URI or URI pattern\nspecified in the extract configuration. These values will be in the same\norder as the URIs specified in the 'destinationUris' field.",
"type": "array",
"items": {
"type": "string",
"format": "int64"
}
},
"inputBytes": {
"description": "Output only. Number of user bytes extracted into the result. This is the\nbyte count as computed by BigQuery for billing purposes\nand doesn't have any relationship with the number of actual\nresult bytes extracted in the desired format.",
"type": "string",
"format": "int64"
}
}
},
"ScriptStatistics": {
"id": "ScriptStatistics",
"description": "Job statistics specific to the child job of a script.",
"type": "object",
"properties": {
"evaluationKind": {
"description": "Whether this child job was a statement or expression.",
"enumDescriptions": [
"",
"The statement appears directly in the script.",
"The statement evaluates an expression that appears in the script."
],
"type": "string",
"enum": [
"EVALUATION_KIND_UNSPECIFIED",
"STATEMENT",
"EXPRESSION"
]
},
"stackFrames": {
"description": "Stack trace showing the line\/column\/procedure name of each frame on the\nstack at the point where the current evaluation happened. The leaf frame\nis first, the primary script is last. Never empty.",
"type": "array",
"items": {
"$ref": "ScriptStackFrame"
}
}
}
},
"ScriptStackFrame": {
"id": "ScriptStackFrame",
"description": "Represents the location of the statement\/expression being evaluated.\nLine and column numbers are defined as follows:\n\n- Line and column numbers start with one. That is, line 1 column 1 denotes\n the start of the script.\n- When inside a stored procedure, all line\/column numbers are relative\n to the procedure body, not the script in which the procedure was defined.\n- Start\/end positions exclude leading\/trailing comments and whitespace.\n The end position always ends with a \";\", when present.\n- Multi-byte Unicode characters are treated as just one column.\n- If the original script (or procedure definition) contains TAB characters,\n a tab \"snaps\" the indentation forward to the nearest multiple of 8\n characters, plus 1. For example, a TAB on column 1, 2, 3, 4, 5, 6 , or 8\n will advance the next character to column 9. A TAB on column 9, 10, 11,\n 12, 13, 14, 15, or 16 will advance the next character to column 17.",
"type": "object",
"properties": {
"startLine": {
"description": "Output only. One-based start line.",
"type": "integer",
"format": "int32"
},
"startColumn": {
"description": "Output only. One-based start column.",
"type": "integer",
"format": "int32"
},
"endLine": {
"description": "Output only. One-based end line.",
"type": "integer",
"format": "int32"
},
"endColumn": {
"description": "Output only. One-based end column.",
"type": "integer",
"format": "int32"
},
"procedureId": {
"description": "Output only. Name of the active procedure, empty if in a top-level\nscript.",
"type": "string"
},
"text": {
"description": "Output only. Text of the current statement\/expression.",
"type": "string"
}
}
},
"JobStatus": {
"id": "JobStatus",
"type": "object",
"properties": {
"errorResult": {
"description": "Output only. Final error result of the job. If present, indicates that the\njob has completed and was unsuccessful.",
"$ref": "ErrorProto"
},
"errors": {
"description": "Output only. The first errors encountered during the running of the job.\nThe final message includes the number of errors that caused the process to\nstop. Errors here do not necessarily mean that the job has not completed or\nwas unsuccessful.",
"type": "array",
"items": {
"$ref": "ErrorProto"
}
},
"state": {
"description": "Output only. Running state of the job. Valid states include 'PENDING',\n'RUNNING', and 'DONE'.",
"type": "string"
}
}
},
"ErrorProto": {
"id": "ErrorProto",
"description": "Error details.",
"type": "object",
"properties": {
"reason": {
"description": "A short error code that summarizes the error.",
"type": "string"
},
"location": {
"description": "Specifies where the error occurred, if present.",
"type": "string"
},
"debugInfo": {
"description": "Debugging information. This property is internal to Google and should not\nbe used.",
"type": "string"
},
"message": {
"description": "A human-readable description of the error.",
"type": "string"
}
}
},
"JobList": {
"id": "JobList",
"type": "object",
"properties": {
"etag": {
"description": "A hash of this page of results.",
"type": "string"
},
"kind": {
"description": "The resource type of the response.",
"type": "string"
},
"nextPageToken": {
"description": "A token to request the next page of results.",
"type": "string"
},
"jobs": {
"description": "List of jobs that were requested.",
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"description": "Unique opaque ID of the job.",
"type": "string"
},
"kind": {
"description": "The resource type.",
"type": "string"
},
"jobReference": {
"description": "Unique opaque ID of the job.",
"$ref": "JobReference"
},
"state": {
"description": "Running state of the job. When the state is DONE, errorResult can be\nchecked to determine whether the job succeeded or failed.",
"type": "string"
},
"errorResult": {
"description": "A result object that will be present only if the job has failed.",
"$ref": "ErrorProto"
},
"statistics": {
"description": "Output only. Information about the job, including starting time and ending\ntime of the job.",
"$ref": "JobStatistics"
},
"configuration": {
"description": "Required. Describes the job configuration.",
"$ref": "JobConfiguration"
},
"status": {
"description": "[Full-projection-only] Describes the status of this job.",
"$ref": "JobStatus"
},
"user_email": {
"description": "[Full-projection-only] Email address of the user who ran the job.",
"type": "string"
}
}
}
}
}
},
"GetQueryResultsResponse": {
"id": "GetQueryResultsResponse",
"description": "Response object of GetQueryResults.",
"type": "object",
"properties": {
"kind": {
"description": "The resource type of the response.",
"type": "string"
},
"etag": {
"description": "A hash of this response.",
"type": "string"
},
"schema": {
"description": "The schema of the results. Present only when the query completes\nsuccessfully.",
"$ref": "TableSchema"
},
"jobReference": {
"description": "Reference to the BigQuery Job that was created to run the query. This field\nwill be present even if the original request timed out, in which case\nGetQueryResults can be used to read the results once the query has\ncompleted. Since this API only returns the first page of results,\nsubsequent pages can be fetched via the same mechanism (GetQueryResults).",
"$ref": "JobReference"
},
"totalRows": {
"description": "The total number of rows in the complete query result set, which can be\nmore than the number of rows in this single page of results. Present only\nwhen the query completes successfully.",
"type": "string",
"format": "uint64"
},
"pageToken": {
"description": "A token used for paging results. When this token is non-empty, it\nindicates additional results are available.",
"type": "string"
},
"rows": {
"description": "An object with as many results as can be contained within the maximum\npermitted reply size. To get any additional rows, you can call\nGetQueryResults and specify the jobReference returned above. Present only\nwhen the query completes successfully.\n\nThe REST-based representation of this data leverages a series of\nJSON f,v objects for indicating fields and values.",
"type": "array",
"items": {
"$ref": "TableRow"
}
},
"totalBytesProcessed": {
"description": "The total number of bytes processed for this query.",
"type": "string",
"format": "int64"
},
"jobComplete": {
"description": "Whether the query has completed or not. If rows or totalRows are present,\nthis will always be true. If this is false, totalRows will not be\navailable.",
"type": "boolean"
},
"errors": {
"description": "Output only. The first errors or warnings encountered during the running\nof the job. The final message includes the number of errors that caused the\nprocess to stop. Errors here do not necessarily mean that the job has\ncompleted or was unsuccessful.",
"type": "array",
"items": {
"$ref": "ErrorProto"
}
},
"cacheHit": {
"description": "Whether the query result was fetched from the query cache.",
"type": "boolean"
},
"numDmlAffectedRows": {
"description": "Output only. The number of rows affected by a DML statement. Present only\nfor DML statements INSERT, UPDATE or DELETE.",
"type": "string",
"format": "int64"
}
}
},
"QueryRequest": {
"id": "QueryRequest",
"type": "object",
"properties": {
"kind": {
"description": "The resource type of the request.",
"type": "string"
},
"query": {
"description": "Required. A query string, following the BigQuery query syntax, of the\nquery to execute. Example: \"SELECT count(f1) FROM\n[myProjectId:myDatasetId.myTableId]\".",
"type": "string"
},
"maxResults": {
"description": "Optional. The maximum number of rows of data to return per page of\nresults. Setting this flag to a small value such as 1000 and then paging\nthrough results might improve reliability when the query result set is\nlarge. In addition to this limit, responses are also limited to 10 MB. By\ndefault, there is no maximum row count, and only the byte limit applies.",
"type": "integer",
"format": "uint32"
},
"defaultDataset": {
"description": "Optional. Specifies the default datasetId and projectId to assume for any\nunqualified table names in the query. If not set, all table names in the\nquery string must be qualified in the format 'datasetId.tableId'.",
"$ref": "DatasetReference"
},
"timeoutMs": {
"description": "Optional. How long to wait for the query to complete, in milliseconds,\nbefore the request times out and returns. Note that this is only a timeout\nfor the request, not the query. If the query takes longer to run than the\ntimeout value, the call returns without any results and with the\n'jobComplete' flag set to false. You can call GetQueryResults() to wait for\nthe query to complete and read the results. The default value is 10000\nmilliseconds (10 seconds).",
"type": "integer",
"format": "uint32"
},
"dryRun": {
"description": "Optional. If set to true, BigQuery doesn't run the job. Instead, if the\nquery is valid, BigQuery returns statistics about the job such as how many\nbytes would be processed. If the query is invalid, an error returns. The\ndefault value is false.",
"type": "boolean"
},
"preserveNulls": {
"description": "This property is deprecated.",
"type": "boolean"
},
"useQueryCache": {
"description": "Optional. Whether to look for the result in the query cache. The query\ncache is a best-effort cache that will be flushed whenever tables in the\nquery are modified. The default value is true.",
"default": "true",
"type": "boolean"
},
"useLegacySql": {
"description": "Specifies whether to use BigQuery's legacy SQL dialect for this query. The\ndefault value is true. If set to false, the query will use BigQuery's\nstandard SQL: https:\/\/cloud.google.com\/bigquery\/sql-reference\/ When\nuseLegacySql is set to false, the value of flattenResults is ignored; query\nwill be run as if flattenResults is false.",
"default": "true",
"type": "boolean"
},
"parameterMode": {
"description": "Standard SQL only. Set to POSITIONAL to use positional (?) query parameters\nor to NAMED to use named (@myparam) query parameters in this query.",
"type": "string"
},
"queryParameters": {
"description": "Query parameters for Standard SQL queries.",
"type": "array",
"items": {
"$ref": "QueryParameter"
}
},
"location": {
"description": "The geographic location where the job should run. See details at\nhttps:\/\/cloud.google.com\/bigquery\/docs\/locations#specifying_your_location.",
"type": "string"
},
"formatOptions": {
"description": "Optional. Output format adjustments.",
"$ref": "DataFormatOptions"
},
"connectionProperties": {
"description": "Optional. Connection properties which can modify the query behavior.",
"type": "array",
"items": {
"$ref": "ConnectionProperty"
}
},
"labels": {
"description": "Optional. The labels associated with this query.\nLabels can be used to organize and group query jobs.\nLabel keys and values can be no longer than 63 characters, can only contain\nlowercase letters, numeric characters, underscores and dashes.\nInternational characters are allowed. Label keys must start with a letter\nand each label in the list must have a different key.",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"maximumBytesBilled": {
"description": "Optional. Limits the bytes billed for this query. Queries with\nbytes billed above this limit will fail (without incurring a charge).\nIf unspecified, the project default is used.",
"type": "string",
"format": "int64"
}
}
},
"DataFormatOptions": {
"id": "DataFormatOptions",
"description": "Options for data format adjustments.",
"type": "object",
"properties": {
"useInt64Timestamp": {
"description": "Optional. Output timestamp as usec int64. Default is false.",
"type": "boolean"
}
}
},
"QueryResponse": {
"id": "QueryResponse",
"type": "object",
"properties": {
"kind": {
"description": "The resource type.",
"type": "string"
},
"schema": {
"description": "The schema of the results. Present only when the query completes\nsuccessfully.",
"$ref": "TableSchema"
},
"jobReference": {
"description": "Reference to the Job that was created to run the query. This field will be\npresent even if the original request timed out, in which case\nGetQueryResults can be used to read the results once the query has\ncompleted. Since this API only returns the first page of results,\nsubsequent pages can be fetched via the same mechanism (GetQueryResults).",
"$ref": "JobReference"
},
"totalRows": {
"description": "The total number of rows in the complete query result set, which can be\nmore than the number of rows in this single page of results.",
"type": "string",
"format": "uint64"
},
"pageToken": {
"description": "A token used for paging results.",
"type": "string"
},
"rows": {
"description": "An object with as many results as can be contained within the maximum\npermitted reply size. To get any additional rows, you can call\nGetQueryResults and specify the jobReference returned above.",
"type": "array",
"items": {
"$ref": "TableRow"
}
},
"totalBytesProcessed": {
"description": "The total number of bytes processed for this query. If this query was a dry\nrun, this is the number of bytes that would be processed if the query were\nrun.",
"type": "string",
"format": "int64"
},
"jobComplete": {
"description": "Whether the query has completed or not. If rows or totalRows are present,\nthis will always be true. If this is false, totalRows will not be\navailable.",
"type": "boolean"
},
"errors": {
"description": "Output only. The first errors or warnings encountered during the running of\nthe job. The final message includes the number of errors that caused the\nprocess to stop. Errors here do not necessarily mean that the job has\ncompleted or was unsuccessful.",
"type": "array",
"items": {
"$ref": "ErrorProto"
}
},
"cacheHit": {
"description": "Whether the query result was fetched from the query cache.",
"type": "boolean"
},
"numDmlAffectedRows": {
"description": "Output only. The number of rows affected by a DML statement. Present only\nfor DML statements INSERT, UPDATE or DELETE.",
"type": "string",
"format": "int64"
}
}
},
"ProjectList": {
"id": "ProjectList",
"description": "Response object of ListProjects",
"type": "object",
"properties": {
"kind": {
"description": "The resource type of the response.",
"type": "string"
},
"etag": {
"description": "A hash of the page of results.",
"type": "string"
},
"nextPageToken": {
"description": "A token to request the next page of results.",
"type": "string"
},
"projects": {
"description": "Projects to which the user has at least READ access.",
"type": "array",
"items": {
"description": "Information about a single project.",
"type": "object",
"properties": {
"kind": {
"description": "The resource type.",
"type": "string"
},
"id": {
"description": "An opaque ID of this project.",
"type": "string"
},
"numericId": {
"description": "The numeric ID of this project.",
"type": "string",
"format": "int64"
},
"projectReference": {
"description": "A unique reference to this project.",
"$ref": "ProjectReference"
},
"friendlyName": {
"description": "A descriptive name for this project.\nA wrapper is used here because friendlyName can be set to the empty string.",
"type": "string"
}
}
}
},
"totalItems": {
"description": "The total number of projects in the list.\nA wrapper is used here because the field should still be in the response\nwhen the value is 0.",
"type": "integer",
"format": "int32"
}
}
},
"ProjectReference": {
"id": "ProjectReference",
"description": "A unique reference to a project.",
"type": "object",
"properties": {
"projectId": {
"description": "Required. ID of the project.\nCan be either the numeric ID or the assigned ID of the project.",
"type": "string"
}
}
},
"GetServiceAccountResponse": {
"id": "GetServiceAccountResponse",
"description": "Response object of GetServiceAccount",
"type": "object",
"properties": {
"kind": {
"description": "The resource type of the response.",
"type": "string"
},
"email": {
"description": "The service account email address.",
"type": "string"
}
}
},
"Routine": {
"id": "Routine",
"description": "A user-defined function or a stored procedure.",
"type": "object",
"properties": {
"etag": {
"description": "Output only. A hash of this resource.",
"type": "string"
},
"routineReference": {
"description": "Required. Reference describing the ID of this routine.",
"$ref": "RoutineReference"
},
"routineType": {
"description": "Required. The type of routine.",
"enumDescriptions": [
"",
"Non-builtin permanent scalar function.",
"Stored procedure."
],
"type": "string",
"enum": [
"ROUTINE_TYPE_UNSPECIFIED",
"SCALAR_FUNCTION",
"PROCEDURE"
]
},
"creationTime": {
"description": "Output only. The time when this routine was created, in milliseconds since\nthe epoch.",
"type": "string",
"format": "int64"
},
"lastModifiedTime": {
"description": "Output only. The time when this routine was last modified, in milliseconds\nsince the epoch.",
"type": "string",
"format": "int64"
},
"language": {
"description": "Optional. Defaults to \"SQL\".",
"enumDescriptions": [
"",
"SQL language.",
"JavaScript language."
],
"type": "string",
"enum": [
"LANGUAGE_UNSPECIFIED",
"SQL",
"JAVASCRIPT"
]
},
"arguments": {
"description": "Optional.",
"type": "array",
"items": {
"$ref": "Argument"
}
},
"returnType": {
"description": "Optional if language = \"SQL\"; required otherwise.\n\nIf absent, the return type is inferred from definition_body at query time\nin each query that references this routine. If present, then the evaluated\nresult will be cast to the specified returned type at query time.\n\nFor example, for the functions created with the following statements:\n\n* `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);`\n\n* `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));`\n\n* `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));`\n\nThe return_type is `{type_kind: \"FLOAT64\"}` for `Add` and `Decrement`, and\nis absent for `Increment` (inferred as FLOAT64 at query time).\n\nSuppose the function `Add` is replaced by\n `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);`\n\nThen the inferred return type of `Increment` is automatically changed to\nINT64 at query time, while the return type of `Decrement` remains FLOAT64.",
"$ref": "StandardSqlDataType"
},
"importedLibraries": {
"description": "Optional. If language = \"JAVASCRIPT\", this field stores the path of the\nimported JAVASCRIPT libraries.",
"type": "array",
"items": {
"type": "string"
}
},
"definitionBody": {
"description": "Required. The body of the routine.\n\nFor functions, this is the expression in the AS clause.\n\nIf language=SQL, it is the substring inside (but excluding) the\nparentheses. For example, for the function created with the following\nstatement:\n\n`CREATE FUNCTION JoinLines(x string, y string) as (concat(x, \"\\n\", y))`\n\nThe definition_body is `concat(x, \"\\n\", y)` (\\n is not replaced with\nlinebreak).\n\nIf language=JAVASCRIPT, it is the evaluated string in the AS clause.\nFor example, for the function created with the following statement:\n\n`CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return \"\\n\";\\n'`\n\nThe definition_body is\n\n`return \"\\n\";\\n`\n\nNote that both \\n are replaced with linebreaks.",
"type": "string"
},
"description": {
"description": "Optional. [Experimental] The description of the routine if defined.",
"type": "string"
}
}
},
"Argument": {
"id": "Argument",
"description": "Input\/output argument of a function or a stored procedure.",
"type": "object",
"properties": {
"name": {
"description": "Optional. The name of this argument. Can be absent for function return argument.",
"type": "string"
},
"argumentKind": {
"description": "Optional. Defaults to FIXED_TYPE.",
"enumDescriptions": [
"",
"The argument is a variable with fully specified type, which can be a\nstruct or an array, but not a table.",
"The argument is any type, including struct or array, but not a table.\nTo be added: FIXED_TABLE, ANY_TABLE"
],
"type": "string",
"enum": [
"ARGUMENT_KIND_UNSPECIFIED",
"FIXED_TYPE",
"ANY_TYPE"
]
},
"mode": {
"description": "Optional. Specifies whether the argument is input or output.\nCan be set for procedures only.",
"enumDescriptions": [
"",
"The argument is input-only.",
"The argument is output-only.",
"The argument is both an input and an output."
],
"type": "string",
"enum": [
"MODE_UNSPECIFIED",
"IN",
"OUT",
"INOUT"
]
},
"dataType": {
"description": "Required unless argument_kind = ANY_TYPE.",
"$ref": "StandardSqlDataType"
}
}
},
"ListRoutinesResponse": {
"id": "ListRoutinesResponse",
"type": "object",
"properties": {
"routines": {
"description": "Routines in the requested dataset. Unless read_mask is set in the request,\nonly the following fields are populated:\netag, project_id, dataset_id, routine_id, routine_type, creation_time,\nlast_modified_time, and language.",
"type": "array",
"items": {
"$ref": "Routine"
}
},
"nextPageToken": {
"description": "A token to request the next page of results.",
"type": "string"
}
}
},
"Table": {
"id": "Table",
"type": "object",
"properties": {
"kind": {
"description": "The type of resource ID.",
"type": "string"
},
"etag": {
"description": "Output only. A hash of this resource.",
"type": "string"
},
"id": {
"description": "Output only. An opaque ID uniquely identifying the table.",
"type": "string"
},
"selfLink": {
"description": "Output only. A URL that can be used to access this resource again.",
"type": "string"
},
"tableReference": {
"description": "Required. Reference describing the ID of this table.",
"$ref": "TableReference"
},
"friendlyName": {
"description": "Optional. A descriptive name for this table.",
"type": "string"
},
"description": {
"description": "Optional. A user-friendly description of this table.",
"type": "string"
},
"labels": {
"description": "The labels associated with this table. You can use these to organize and\ngroup your tables. Label keys and values can be no longer than 63\ncharacters, can only contain lowercase letters, numeric characters,\nunderscores and dashes. International characters are allowed. Label values\nare optional. Label keys must start with a letter and each label in the\nlist must have a different key.",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"schema": {
"description": "[Optional] Describes the schema of this table.",
"$ref": "TableSchema"
},
"timePartitioning": {
"description": "If specified, configures time-based partitioning for this table.",
"$ref": "TimePartitioning"
},
"rangePartitioning": {
"description": "If specified, configures range partitioning for this table.",
"$ref": "RangePartitioning"
},
"clustering": {
"description": "Clustering specification for the table. Must be specified with time-based\npartitioning, data in the table will be first partitioned and subsequently\nclustered.",
"$ref": "Clustering"
},
"requirePartitionFilter": {
"description": "[Optional] If set to true, queries over this table require\na partition filter that can be used for partition elimination to be\nspecified.",
"default": "false",
"type": "boolean"
},
"numBytes": {
"description": "Output only. The size of this table in bytes, excluding any data in the\nstreaming buffer.",
"type": "string",
"format": "int64"
},
"numPhysicalBytes": {
"description": "Output only. Experimental. The physical size of this table in bytes,\nexcluding any data in the streaming buffer. This includes compression and\nstorage used for time travel.",
"type": "string",
"format": "int64"
},
"numLongTermBytes": {
"description": "Output only. The number of bytes in the table that are considered\n\"long-term storage\".",
"type": "string",
"format": "int64"
},
"numRows": {
"description": "Output only. The number of rows of data in this table, excluding any data\nin the streaming buffer.",
"type": "string",
"format": "uint64"
},
"creationTime": {
"description": "Output only. The time when this table was created, in milliseconds since\nthe epoch.",
"type": "string",
"format": "int64"
},
"expirationTime": {
"description": "Optional. The time when this table expires, in milliseconds since the\nepoch. If not present, the table will persist indefinitely. Expired tables\nwill be deleted and their storage reclaimed. The defaultTableExpirationMs\nproperty of the encapsulating dataset can be used to set a default\nexpirationTime on newly created tables.",
"type": "string",
"format": "int64"
},
"lastModifiedTime": {
"description": "Output only. The time when this table was last modified, in milliseconds\nsince the epoch.",
"type": "string",
"format": "uint64"
},
"type": {
"description": "Output only. Describes the table type. The following values are supported:\nTABLE: A normal BigQuery table.\nVIEW: A virtual table defined by a SQL query.\nEXTERNAL: A table that references data stored in an external storage\nsystem, such as Google Cloud Storage.\nThe default value is TABLE.",
"type": "string"
},
"view": {
"description": "Optional. The view definition.",
"$ref": "ViewDefinition"
},
"materializedView": {
"description": "Optional. [Experimental] The materialized view definition.",
"$ref": "MaterializedViewDefinition"
},
"externalDataConfiguration": {
"description": "Optional. Describes the data format, location, and other properties of\na table stored outside of BigQuery. By defining these properties, the data\nsource can then be queried as if it were a standard BigQuery table.",
"$ref": "ExternalDataConfiguration"
},
"location": {
"description": "Output only. The geographic location where the table resides. This value\nis inherited from the dataset.",
"type": "string"
},
"streamingBuffer": {
"description": "Output only. Contains information regarding this table's streaming buffer,\nif one is present. This field will be absent if the table is not being\nstreamed to or if there is no data in the streaming buffer.",
"$ref": "Streamingbuffer"
},
"encryptionConfiguration": {
"description": "Custom encryption configuration (e.g., Cloud KMS keys).",
"$ref": "EncryptionConfiguration"
}
}
},
"ViewDefinition": {
"id": "ViewDefinition",
"type": "object",
"properties": {
"query": {
"description": "Required. A query that BigQuery executes when the view is referenced.",
"type": "string"
},
"userDefinedFunctionResources": {
"description": "Describes user-defined function resources used in the query.",
"type": "array",
"items": {
"$ref": "UserDefinedFunctionResource"
}
},
"useLegacySql": {
"description": "Specifies whether to use BigQuery's legacy SQL for this view.\nThe default value is true. If set to false, the view will use\nBigQuery's standard SQL:\nhttps:\/\/cloud.google.com\/bigquery\/sql-reference\/\n\nQueries and views that reference this view must use the same flag value.\nA wrapper is used here because the default value is True.",
"type": "boolean"
}
}
},
"MaterializedViewDefinition": {
"id": "MaterializedViewDefinition",
"type": "object",
"properties": {
"query": {
"description": "Required. A query whose results are persisted.",
"type": "string"
},
"lastRefreshTime": {
"description": "Output only. The time when this materialized view was last refreshed, in\nmilliseconds since the epoch.",
"type": "string",
"format": "int64"
},
"enableRefresh": {
"description": "Optional. Enable automatic refresh of the materialized view when the base table is\nupdated. The default value is \"true\".",
"type": "boolean"
},
"refreshIntervalMs": {
"description": "Optional. The maximum frequency at which this materialized view will be refreshed.\nThe default value is \"1800000\" (30 minutes).",
"type": "string",
"format": "uint64"
}
}
},
"Streamingbuffer": {
"id": "Streamingbuffer",
"type": "object",
"properties": {
"estimatedBytes": {
"description": "Output only. A lower-bound estimate of the number of bytes currently in\nthe streaming buffer.",
"type": "string",
"format": "uint64"
},
"estimatedRows": {
"description": "Output only. A lower-bound estimate of the number of rows currently in the\nstreaming buffer.",
"type": "string",
"format": "uint64"
},
"oldestEntryTime": {
"description": "Output only. Contains the timestamp of the oldest entry in the streaming\nbuffer, in milliseconds since the epoch, if the streaming buffer is\navailable.",
"type": "string",
"format": "uint64"
}
}
},
"TableList": {
"id": "TableList",
"type": "object",
"properties": {
"kind": {
"description": "The type of list.",
"type": "string"
},
"etag": {
"description": "A hash of this page of results.",
"type": "string"
},
"nextPageToken": {
"description": "A token to request the next page of results.",
"type": "string"
},
"tables": {
"description": "Tables in the requested dataset.",
"type": "array",
"items": {
"type": "object",
"properties": {
"kind": {
"description": "The resource type.",
"type": "string"
},
"id": {
"description": "An opaque ID of the table.",
"type": "string"
},
"tableReference": {
"description": "A reference uniquely identifying table.",
"$ref": "TableReference"
},
"friendlyName": {
"description": "The user-friendly name for this table.",
"type": "string"
},
"type": {
"description": "The type of table.",
"type": "string"
},
"timePartitioning": {
"description": "The time-based partitioning for this table.",
"$ref": "TimePartitioning"
},
"rangePartitioning": {
"description": "The range partitioning for this table.",
"$ref": "RangePartitioning"
},
"clustering": {
"description": "[TrustedTester] Clustering specification for this table, if configured.",
"$ref": "Clustering"
},
"hivePartitioningOptions": {
"description": "[Experimental] The hive partitioning configuration for this table,\nwhen applicable.",
"$ref": "HivePartitioningOptions"
},
"labels": {
"description": "The labels associated with this table. You can use these to organize\nand group your tables.",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"view": {
"description": "Additional details for a view.",
"type": "object",
"properties": {
"useLegacySql": {
"description": "True if view is defined in legacy SQL dialect,\nfalse if in standard SQL.",
"type": "boolean"
}
}
},
"creationTime": {
"description": "Output only. The time when this table was created, in milliseconds since\nthe epoch.",
"type": "string",
"format": "int64"
},
"expirationTime": {
"description": "The time when this table expires, in milliseconds since the\nepoch. If not present, the table will persist indefinitely. Expired tables\nwill be deleted and their storage reclaimed.",
"type": "string",
"format": "int64"
}
}
}
},
"totalItems": {
"description": "The total number of tables in the dataset.",
"type": "integer",
"format": "int32"
}
}
},
"TableDataInsertAllRequest": {
"id": "TableDataInsertAllRequest",
"type": "object",
"properties": {
"kind": {
"description": "The resource type of the response.",
"type": "string"
},
"skipInvalidRows": {
"description": "[Optional] Insert all valid rows of a request, even if invalid rows exist.\nThe default value is false, which causes the entire request to fail if any\ninvalid rows exist.",
"type": "boolean"
},
"ignoreUnknownValues": {
"description": "[Optional] Accept rows that contain values that do not match the schema.\nThe unknown values are ignored.\nDefault is false, which treats unknown values as errors.",
"type": "boolean"
},
"templateSuffix": {
"description": "If specified, treats the destination table as a base\n template, and inserts the rows into an instance table named\n\"{destination}{templateSuffix}\". BigQuery will manage creation of the\ninstance table, using the schema of the base template table.\n\nSee\nhttps:\/\/cloud.google.com\/bigquery\/streaming-data-into-bigquery#template-tables\nfor considerations when working with templates tables.",
"type": "string"
},
"rows": {
"type": "array",
"items": {
"type": "object",
"properties": {
"insertId": {
"type": "string"
},
"json": {
"$ref": "JsonObject"
}
}
}
}
}
},
"TableDataInsertAllResponse": {
"id": "TableDataInsertAllResponse",
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"insertErrors": {
"type": "array",
"items": {
"type": "object",
"properties": {
"index": {
"type": "integer",
"format": "uint32"
},
"errors": {
"type": "array",
"items": {
"$ref": "ErrorProto"
}
}
}
}
}
}
},
"TableDataList": {
"id": "TableDataList",
"type": "object",
"properties": {
"kind": {
"description": "Will be set to \"bigquery#tableDataList\".",
"type": "string"
},
"etag": {
"description": "Etag to the response.",
"type": "string"
},
"totalRows": {
"description": "Total rows of the entire table. In order to show default value \"0\",\nwe have to present it as string.",
"type": "string"
},
"pageToken": {
"description": "A token indicates from where we should start the next read.",
"type": "string"
},
"rows": {
"description": "Repeated rows as result. The REST-based representation of this data\nleverages a series of JSON f,v objects for indicating fields and values.",
"type": "array",
"items": {
"$ref": "TableRow"
}
}
}
},
"SetIamPolicyRequest": {
"id": "SetIamPolicyRequest",
"description": "Request message for `SetIamPolicy` method.",
"type": "object",
"properties": {
"policy": {
"description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.",
"$ref": "Policy"
},
"updateMask": {
"description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\n\n`paths: \"bindings, etag\"`",
"type": "string",
"format": "google-fieldmask"
}
}
},
"Policy": {
"id": "Policy",
"description": "An Identity and Access Management (IAM) policy, which specifies access\ncontrols for Google Cloud resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions; each `role` can be an IAM predefined role or a user-created\ncustom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a\n`condition`, which is a logical expression that allows access to a resource\nonly if the expression evaluates to `true`. A condition can add constraints\nbased on attributes of the request, the resource, or both. To learn which\nresources support conditions in their IAM policies, see the\n[IAM documentation](https:\/\/cloud.google.com\/iam\/help\/conditions\/resource-policies).\n\n**JSON example:**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles\/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles\/resourcemanager.organizationViewer\",\n \"members\": [\n \"user:eve@example.com\"\n ],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ],\n \"etag\": \"BwWWja0YfJA=\",\n \"version\": 3\n }\n\n**YAML example:**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles\/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles\/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time < timestamp('2020-10-01T00:00:00.000Z')\n - etag: BwWWja0YfJA=\n - version: 3\n\nFor a description of IAM and its features, see the\n[IAM documentation](https:\/\/cloud.google.com\/iam\/docs\/).",
"type": "object",
"properties": {
"version": {
"description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value\nare rejected.\n\nAny operation that affects conditional role bindings must specify version\n`3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding\n* Adding a conditional role binding to a policy\n* Changing a conditional role binding in a policy\n* Removing any role binding, with or without a condition, from a policy\n that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may\nspecify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM documentation](https:\/\/cloud.google.com\/iam\/help\/conditions\/resource-policies).",
"type": "integer",
"format": "int32"
},
"bindings": {
"description": "Associates a list of `members` to a `role`. Optionally, may specify a\n`condition` that determines how and when the `bindings` are applied. Each\nof the `bindings` must contain at least one member.",
"type": "array",
"items": {
"$ref": "Binding"
}
},
"auditConfigs": {
"description": "Specifies cloud audit logging configuration for this policy.",
"type": "array",
"items": {
"$ref": "AuditConfig"
}
},
"etag": {
"description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field\nwhenever you call `setIamPolicy`. If you omit this field, then IAM allows\nyou to overwrite a version `3` policy with a version `1` policy, and all of\nthe conditions in the version `3` policy are lost.",
"type": "string",
"format": "byte"
}
}
},
"Binding": {
"id": "Binding",
"description": "Associates `members` with a `role`.",
"type": "object",
"properties": {
"role": {
"description": "Role that is assigned to `members`.\nFor example, `roles\/viewer`, `roles\/editor`, or `roles\/owner`.",
"type": "string"
},
"members": {
"description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example, `alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n",
"type": "array",
"items": {
"type": "string"
}
},
"condition": {
"description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the\ncurrent request.\n\nIf the condition evaluates to `false`, then this binding does not apply to\nthe current request. However, a different role binding might grant the same\nrole to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https:\/\/cloud.google.com\/iam\/help\/conditions\/resource-policies).",
"$ref": "Expr"
}
}
},
"Expr": {
"id": "Expr",
"description": "Represents a textual expression in the Common Expression Language (CEL)\nsyntax. CEL is a C-like expression language. The syntax and semantics of CEL\nare documented at https:\/\/github.com\/google\/cel-spec.\n\nExample (Comparison):\n\n title: \"Summary size limit\"\n description: \"Determines if a summary is less than 100 chars\"\n expression: \"document.summary.size() < 100\"\n\nExample (Equality):\n\n title: \"Requestor is owner\"\n description: \"Determines if requestor is the document owner\"\n expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\n title: \"Public documents\"\n description: \"Determine whether the document should be publicly visible\"\n expression: \"document.type != 'private' && document.type != 'internal'\"\n\nExample (Data Manipulation):\n\n title: \"Notification string\"\n description: \"Create a notification string with a timestamp.\"\n expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression\nare determined by the service that evaluates it. See the service\ndocumentation for additional information.",
"type": "object",
"properties": {
"expression": {
"description": "Textual representation of an expression in Common Expression Language\nsyntax.",
"type": "string"
},
"title": {
"description": "Optional. Title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.",
"type": "string"
},
"description": {
"description": "Optional. Description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.",
"type": "string"
},
"location": {
"description": "Optional. String indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.",
"type": "string"
}
}
},
"AuditConfig": {
"id": "AuditConfig",
"description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.",
"type": "object",
"properties": {
"service": {
"description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.",
"type": "string"
},
"auditLogConfigs": {
"description": "The configuration for logging of each type of permission.",
"type": "array",
"items": {
"$ref": "AuditLogConfig"
}
}
}
},
"AuditLogConfig": {
"id": "AuditLogConfig",
"description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.",
"type": "object",
"properties": {
"logType": {
"description": "The log type that this config enables.",
"enumDescriptions": [
"Default case. Should never be this.",
"Admin reads. Example: CloudIAM getIamPolicy",
"Data writes. Example: CloudSQL Users create",
"Data reads. Example: CloudSQL Users list"
],
"type": "string",
"enum": [
"LOG_TYPE_UNSPECIFIED",
"ADMIN_READ",
"DATA_WRITE",
"DATA_READ"
]
},
"exemptedMembers": {
"description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"GetIamPolicyRequest": {
"id": "GetIamPolicyRequest",
"description": "Request message for `GetIamPolicy` method.",
"type": "object",
"properties": {
"options": {
"description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`.",
"$ref": "GetPolicyOptions"
}
}
},
"GetPolicyOptions": {
"id": "GetPolicyOptions",
"description": "Encapsulates settings provided to GetIamPolicy.",
"type": "object",
"properties": {
"requestedPolicyVersion": {
"description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the\n[IAM\ndocumentation](https:\/\/cloud.google.com\/iam\/help\/conditions\/resource-policies).",
"type": "integer",
"format": "int32"
}
}
},
"TestIamPermissionsRequest": {
"id": "TestIamPermissionsRequest",
"description": "Request message for `TestIamPermissions` method.",
"type": "object",
"properties": {
"permissions": {
"description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https:\/\/cloud.google.com\/iam\/docs\/overview#permissions).",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"TestIamPermissionsResponse": {
"id": "TestIamPermissionsResponse",
"description": "Response message for `TestIamPermissions` method.",
"type": "object",
"properties": {
"permissions": {
"description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"LocationMetadata": {
"id": "LocationMetadata",
"description": "BigQuery-specific metadata about a location. This will be set on\ngoogle.cloud.location.Location.metadata in Cloud Location API\nresponses.",
"type": "object",
"properties": {
"legacyLocationId": {
"description": "The legacy BigQuery location ID, e.g. \u201CEU\u201D for the \u201Ceurope\u201D location.\nThis is for any API consumers that need the legacy \u201CUS\u201D and \u201CEU\u201D locations.",
"type": "string"
}
}
},
"TableRow": {
"id": "TableRow",
"type": "object",
"properties": {
"f": {
"description": "Represents a single row in the result set, consisting of one or more fields.",
"type": "array",
"items": {
"$ref": "TableCell"
}
}
}
},
"TableCell": {
"id": "TableCell",
"type": "object",
"properties": {
"v": {
"type": "any"
}
}
},
"JsonObject": {
"id": "JsonObject",
"description": "Represents a single JSON object.",
"type": "object",
"additionalProperties": {
"$ref": "JsonValue"
}
},
"JsonValue": {
"id": "JsonValue",
"type": "any"
}
},
"resources": {
"datasets": {
"methods": {
"get": {
"id": "bigquery.datasets.get",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the requested dataset",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the requested dataset",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Returns the dataset specified by datasetID."
},
"insert": {
"id": "bigquery.datasets.insert",
"path": "bigquery/v2/projects/{+projectId}/datasets",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets",
"httpMethod": "POST",
"parameters": {
"projectId": {
"description": "Required. Project ID of the new dataset",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId"
],
"request": {
"$ref": "Dataset"
},
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Creates a new empty dataset."
},
"patch": {
"id": "bigquery.datasets.patch",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}",
"httpMethod": "PATCH",
"parameters": {
"projectId": {
"description": "Required. Project ID of the dataset being updated",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the dataset being updated",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"request": {
"$ref": "Dataset"
},
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Updates information in an existing dataset. The update method replaces the\nentire dataset resource, whereas the patch method only replaces fields that\nare provided in the submitted dataset resource.\nThis method supports RFC5789 patch semantics."
},
"update": {
"id": "bigquery.datasets.update",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}",
"httpMethod": "PUT",
"parameters": {
"projectId": {
"description": "Required. Project ID of the dataset being updated",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the dataset being updated",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"request": {
"$ref": "Dataset"
},
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Updates information in an existing dataset. The update method replaces the\nentire dataset resource, whereas the patch method only replaces fields that\nare provided in the submitted dataset resource."
},
"delete": {
"id": "bigquery.datasets.delete",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}",
"httpMethod": "DELETE",
"parameters": {
"projectId": {
"description": "Required. Project ID of the dataset being deleted",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of dataset being deleted",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"deleteContents": {
"description": "If True, delete all the tables in the dataset.\nIf False and the dataset contains tables, the request will fail.\nDefault is False",
"location": "query",
"type": "boolean"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Deletes the dataset specified by the datasetId value. Before you can delete\na dataset, you must delete all its tables, either manually or by specifying\ndeleteContents. Immediately after deletion, you can create another dataset\nwith the same name."
},
"list": {
"id": "bigquery.datasets.list",
"path": "bigquery/v2/projects/{+projectId}/datasets",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the datasets to be listed",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults",
"location": "query",
"type": "string"
},
"all": {
"description": "Whether to list all datasets, including hidden ones",
"location": "query",
"type": "boolean"
},
"filter": {
"description": "An expression for filtering the results of the request by label.\nThe syntax is \\\"labels.<name>[:<value>]\\\".\nMultiple filters can be ANDed together by connecting with a space.\nExample: \\\"labels.department:receiving labels.active\\\".\nSee [Filtering datasets using\nlabels](\/bigquery\/docs\/labeling-datasets#filtering_datasets_using_labels)\nfor details.",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId"
],
"response": {
"$ref": "DatasetList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Lists all datasets in the specified project to which the user has been\ngranted the READER dataset role."
},
"setIamPolicy": {
"id": "bigquery.datasets.setIamPolicy",
"path": "bigquery/v2/{+resource}:setIamPolicy",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}:setIamPolicy",
"httpMethod": "POST",
"parameters": {
"resource": {
"description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.",
"location": "path",
"required": true,
"pattern": "^projects\/[^\/]+\/datasets\/[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"resource"
],
"request": {
"$ref": "SetIamPolicyRequest"
},
"response": {
"$ref": "Policy"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."
},
"getIamPolicy": {
"id": "bigquery.datasets.getIamPolicy",
"path": "bigquery/v2/{+resource}:getIamPolicy",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}:getIamPolicy",
"httpMethod": "POST",
"parameters": {
"resource": {
"description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.",
"location": "path",
"required": true,
"pattern": "^projects\/[^\/]+\/datasets\/[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"resource"
],
"request": {
"$ref": "GetIamPolicyRequest"
},
"response": {
"$ref": "Policy"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."
},
"testIamPermissions": {
"id": "bigquery.datasets.testIamPermissions",
"path": "bigquery/v2/{+resource}:testIamPermissions",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}:testIamPermissions",
"httpMethod": "POST",
"parameters": {
"resource": {
"description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.",
"location": "path",
"required": true,
"pattern": "^projects\/[^\/]+\/datasets\/[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"resource"
],
"request": {
"$ref": "TestIamPermissionsRequest"
},
"response": {
"$ref": "TestIamPermissionsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."
}
}
},
"models": {
"methods": {
"get": {
"id": "bigquery.models.get",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/models/{modelsId}",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the requested model.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the requested model.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"modelId": {
"description": "Required. Model ID of the requested model.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"modelId"
],
"response": {
"$ref": "Model"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Gets the specified model resource by model ID."
},
"list": {
"id": "bigquery.models.list",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/models",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/models",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the models to list.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the models to list.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"pageToken": {
"description": "Page token, returned by a previous call to request the next page of\nresults",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"response": {
"$ref": "ListModelsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Lists all models in the specified dataset. Requires the READER dataset\nrole."
},
"patch": {
"id": "bigquery.models.patch",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/models/{modelsId}",
"httpMethod": "PATCH",
"parameters": {
"projectId": {
"description": "Required. Project ID of the model to patch.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the model to patch.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"modelId": {
"description": "Required. Model ID of the model to patch.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"modelId"
],
"request": {
"$ref": "Model"
},
"response": {
"$ref": "Model"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Patch specific fields in the specified model."
},
"delete": {
"id": "bigquery.models.delete",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/models/{modelsId}",
"httpMethod": "DELETE",
"parameters": {
"projectId": {
"description": "Required. Project ID of the model to delete.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the model to delete.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"modelId": {
"description": "Required. Model ID of the model to delete.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"modelId"
],
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Deletes the model specified by modelId from the dataset."
}
}
},
"jobs": {
"methods": {
"cancel": {
"id": "bigquery.jobs.cancel",
"path": "bigquery/v2/projects/{+projectId}/jobs/{+jobId}/cancel",
"flatPath": "bigquery/v2/projects/{projectsId}/jobs/{jobsId}/cancel",
"httpMethod": "POST",
"parameters": {
"projectId": {
"description": "Required. Project ID of the job to cancel",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"jobId": {
"description": "Required. Job ID of the job to cancel",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"location": {
"description": "The geographic location of the job. Required except for US and EU. See\ndetails at\nhttps:\/\/cloud.google.com\/bigquery\/docs\/locations#specifying_your_location.",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"jobId"
],
"response": {
"$ref": "JobCancelResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Requests that a job be cancelled. This call will return immediately, and\nthe client will need to poll for the job status to see if the cancel\ncompleted successfully. Cancelled jobs may still incur costs."
},
"get": {
"id": "bigquery.jobs.get",
"path": "bigquery/v2/projects/{+projectId}/jobs/{+jobId}",
"flatPath": "bigquery/v2/projects/{projectsId}/jobs/{jobsId}",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the requested job.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"jobId": {
"description": "Required. Job ID of the requested job.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"location": {
"description": "The geographic location of the job. Required except for US and EU. See\ndetails at\nhttps:\/\/cloud.google.com\/bigquery\/docs\/locations#specifying_your_location.",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"jobId"
],
"response": {
"$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Returns information about a specific job. Job information is available for\na six month period after creation. Requires that you're the person who ran\nthe job, or have the Is Owner project role."
},
"insert": {
"id": "bigquery.jobs.insert",
"path": "bigquery/v2/projects/{+projectId}/jobs",
"flatPath": "bigquery/v2/projects/{projectsId}/jobs",
"httpMethod": "POST",
"parameters": {
"projectId": {
"description": "Project ID of project that will be billed for the job.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId"
],
"supportsMediaUpload": true,
"mediaUpload": {
"accept": [
"*/*"
],
"protocols": {
"simple": {
"multipart": true,
"path": "/upload/bigquery/v2/projects/{+projectId}/jobs"
}
}
},
"request": {
"$ref": "Job"
},
"response": {
"$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
],
"description": "Starts a new asynchronous job.\n\nThis API has two different kinds of endpoint URIs, as this method supports\na variety of use cases.\n\n* The *Metadata* URI is used for most interactions, as it accepts the job\n configuration directly.\n* The *Upload* URI is ONLY for the case when you're sending both a load job\n configuration and a data stream together. In this case, the Upload URI\n accepts the job configuration and the data as two distinct multipart MIME\n parts."
},
"list": {
"id": "bigquery.jobs.list",
"path": "bigquery/v2/projects/{+projectId}/jobs",
"flatPath": "bigquery/v2/projects/{projectsId}/jobs",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Project ID of the jobs to list.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"allUsers": {
"description": "Whether to display jobs owned by all users in the project. Default False.",
"location": "query",
"type": "boolean"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"location": "query",
"type": "integer",
"format": "int32"
},
"minCreationTime": {
"description": "Min value for job creation time, in milliseconds since the POSIX epoch.\nIf set, only jobs created after or at this timestamp are returned.",
"location": "query",
"type": "string",
"format": "uint64"
},
"maxCreationTime": {
"description": "Max value for job creation time, in milliseconds since the POSIX epoch.\nIf set, only jobs created before or at this timestamp are returned.",
"location": "query",
"type": "string",
"format": "uint64"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults.",
"location": "query",
"type": "string"
},
"projection": {
"description": "Restrict information returned to a set of selected fields",
"location": "query",
"type": "string",
"enum": [
"MINIMAL",
"FULL"
]
},
"stateFilter": {
"description": "Filter for job state",
"location": "query",
"repeated": true,
"type": "string",
"enum": [
"DONE",
"PENDING",
"RUNNING"
]
},
"parentJobId": {
"description": "If set, show only child jobs of the specified parent. Otherwise, show all\ntop-level jobs.",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId"
],
"response": {
"$ref": "JobList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Lists all jobs that you started in the specified project. Job information\nis available for a six month period after creation. The job list is sorted\nin reverse chronological order, by job creation time. Requires the Can View\nproject role, or the Is Owner project role if you set the allUsers\nproperty."
},
"getQueryResults": {
"id": "bigquery.jobs.getQueryResults",
"path": "bigquery/v2/projects/{+projectId}/queries/{+jobId}",
"flatPath": "bigquery/v2/projects/{projectsId}/queries/{queriesId}",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the query job.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"jobId": {
"description": "Required. Job ID of the query job.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"startIndex": {
"description": "Zero-based index of the starting row.",
"location": "query",
"type": "string",
"format": "uint64"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults.",
"location": "query",
"type": "string"
},
"maxResults": {
"description": "Maximum number of results to read.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"timeoutMs": {
"description": "How long to wait for the query to complete, in milliseconds, before\nreturning. Default is 10 seconds. If the timeout passes before the job\ncompletes, the 'jobComplete' field in the response will be false.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"location": {
"description": "The geographic location of the job. Required except for US and EU. See\ndetails at\nhttps:\/\/cloud.google.com\/bigquery\/docs\/locations#specifying_your_location.",
"location": "query",
"type": "string"
},
"formatOptions.useInt64Timestamp": {
"description": "Optional. Output timestamp as usec int64. Default is false.",
"location": "query",
"type": "boolean"
}
},
"parameterOrder": [
"projectId",
"jobId"
],
"response": {
"$ref": "GetQueryResultsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "RPC to get the results of a query job."
},
"query": {
"id": "bigquery.jobs.query",
"path": "bigquery/v2/projects/{+projectId}/queries",
"flatPath": "bigquery/v2/projects/{projectsId}/queries",
"httpMethod": "POST",
"parameters": {
"projectId": {
"description": "Required. Project ID of the query request.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId"
],
"request": {
"$ref": "QueryRequest"
},
"response": {
"$ref": "QueryResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Runs a BigQuery SQL query synchronously and returns query results if the\nquery completes within a specified timeout."
}
}
},
"projects": {
"methods": {
"list": {
"id": "bigquery.projects.list",
"path": "bigquery/v2/projects",
"flatPath": "bigquery/v2/projects",
"httpMethod": "GET",
"parameters": {
"maxResults": {
"description": "Maximum number of results to read.\nA wrapper is used here because behavior for maxResults 0 differs from the\nbehavior for maxResults unset.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults.",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
],
"response": {
"$ref": "ProjectList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "RPC to list projects to which the user has been granted any project role.\n\nUsers of this method are encouraged to consider the\n[Resource Manager](https:\/\/cloud.google.com\/resource-manager\/docs\/) API,\nwhich provides the underlying data for this method and has more\ncapabilities."
},
"getServiceAccount": {
"id": "bigquery.projects.getServiceAccount",
"path": "bigquery/v2/projects/{+projectId}/serviceAccount",
"flatPath": "bigquery/v2/projects/{projectsId}/serviceAccount",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. ID of the project.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId"
],
"response": {
"$ref": "GetServiceAccountResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "RPC to get the service account for a project used for interactions with\nGoogle Cloud KMS"
}
}
},
"routines": {
"methods": {
"get": {
"id": "bigquery.routines.get",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the requested routine",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the requested routine",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"routineId": {
"description": "Required. Routine ID of the requested routine",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"readMask": {
"description": "If set, only the Routine fields in the field mask are returned in the\nresponse. If unset, all Routine fields are returned.",
"location": "query",
"type": "string",
"format": "google-fieldmask"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Gets the specified routine resource by routine ID."
},
"insert": {
"id": "bigquery.routines.insert",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/routines",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/routines",
"httpMethod": "POST",
"parameters": {
"projectId": {
"description": "Required. Project ID of the new routine",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the new routine",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"request": {
"$ref": "Routine"
},
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Creates a new routine in the dataset."
},
"update": {
"id": "bigquery.routines.update",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "PUT",
"parameters": {
"projectId": {
"description": "Required. Project ID of the routine to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the routine to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"routineId": {
"description": "Required. Routine ID of the routine to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"request": {
"$ref": "Routine"
},
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Updates information in an existing routine. The update method replaces the\nentire Routine resource."
},
"delete": {
"id": "bigquery.routines.delete",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "DELETE",
"parameters": {
"projectId": {
"description": "Required. Project ID of the routine to delete",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the routine to delete",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"routineId": {
"description": "Required. Routine ID of the routine to delete",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Deletes the routine specified by routineId from the dataset."
},
"list": {
"id": "bigquery.routines.list",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/routines",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/routines",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the routines to list",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the routines to list",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults",
"location": "query",
"type": "string"
},
"readMask": {
"description": "If set, then only the Routine fields in the field mask, as well as\nproject_id, dataset_id and routine_id, are returned in the response.\nIf unset, then the following Routine fields are returned:\netag, project_id, dataset_id, routine_id, routine_type, creation_time,\nlast_modified_time, and language.",
"location": "query",
"type": "string",
"format": "google-fieldmask"
},
"filter": {
"description": "If set, then only the Routines matching this filter are returned.\nThe current supported form is either \"routine_type:<RoutineType>\" or\n\"routineType:<RoutineType>\", where <RoutineType> is a RoutineType enum.\nExample: \"routineType:SCALAR_FUNCTION\".",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"response": {
"$ref": "ListRoutinesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Lists all routines in the specified dataset. Requires the READER dataset\nrole."
}
}
},
"tables": {
"methods": {
"get": {
"id": "bigquery.tables.get",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the requested table",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the requested table",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"tableId": {
"description": "Required. Table ID of the requested table",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"selectedFields": {
"description": "List of table schema fields to return (comma-separated).\nIf unspecified, all fields are returned.\nA fieldMask cannot be used here because the fields will automatically be\nconverted from camelCase to snake_case and the conversion will fail if\nthere are underscores. Since these are fields in BigQuery table schemas,\nunderscores are allowed.",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Gets the specified table resource by table ID.\nThis method does not return the data in the table, it only returns the\ntable resource, which describes the structure of this table."
},
"insert": {
"id": "bigquery.tables.insert",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables",
"httpMethod": "POST",
"parameters": {
"projectId": {
"description": "Required. Project ID of the new table",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the new table",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"request": {
"$ref": "Table"
},
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Creates a new, empty table in the dataset."
},
"patch": {
"id": "bigquery.tables.patch",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}",
"httpMethod": "PATCH",
"parameters": {
"projectId": {
"description": "Required. Project ID of the table to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the table to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"tableId": {
"description": "Required. Table ID of the table to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"request": {
"$ref": "Table"
},
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Updates information in an existing table. The update method replaces the\nentire table resource, whereas the patch method only replaces fields that\nare provided in the submitted table resource.\nThis method supports RFC5789 patch semantics."
},
"update": {
"id": "bigquery.tables.update",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}",
"httpMethod": "PUT",
"parameters": {
"projectId": {
"description": "Required. Project ID of the table to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the table to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"tableId": {
"description": "Required. Table ID of the table to update",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"request": {
"$ref": "Table"
},
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Updates information in an existing table. The update method replaces the\nentire Table resource, whereas the patch method only replaces fields that\nare provided in the submitted Table resource."
},
"delete": {
"id": "bigquery.tables.delete",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}",
"httpMethod": "DELETE",
"parameters": {
"projectId": {
"description": "Required. Project ID of the table to delete",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the table to delete",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"tableId": {
"description": "Required. Table ID of the table to delete",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Deletes the table specified by tableId from the dataset.\nIf the table contains data, all the data will be deleted."
},
"list": {
"id": "bigquery.tables.list",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project ID of the tables to list",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset ID of the tables to list",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults",
"location": "query",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId"
],
"response": {
"$ref": "TableList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Lists all tables in the specified dataset. Requires the READER dataset\nrole."
},
"setIamPolicy": {
"id": "bigquery.tables.setIamPolicy",
"path": "bigquery/v2/{+resource}:setIamPolicy",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:setIamPolicy",
"httpMethod": "POST",
"parameters": {
"resource": {
"description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.",
"location": "path",
"required": true,
"pattern": "^projects\/[^\/]+\/datasets\/[^\/]+\/tables\/[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"resource"
],
"request": {
"$ref": "SetIamPolicyRequest"
},
"response": {
"$ref": "Policy"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."
},
"getIamPolicy": {
"id": "bigquery.tables.getIamPolicy",
"path": "bigquery/v2/{+resource}:getIamPolicy",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:getIamPolicy",
"httpMethod": "POST",
"parameters": {
"resource": {
"description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.",
"location": "path",
"required": true,
"pattern": "^projects\/[^\/]+\/datasets\/[^\/]+\/tables\/[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"resource"
],
"request": {
"$ref": "GetIamPolicyRequest"
},
"response": {
"$ref": "Policy"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."
},
"testIamPermissions": {
"id": "bigquery.tables.testIamPermissions",
"path": "bigquery/v2/{+resource}:testIamPermissions",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}:testIamPermissions",
"httpMethod": "POST",
"parameters": {
"resource": {
"description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.",
"location": "path",
"required": true,
"pattern": "^projects\/[^\/]+\/datasets\/[^\/]+\/tables\/[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"resource"
],
"request": {
"$ref": "TestIamPermissionsRequest"
},
"response": {
"$ref": "TestIamPermissionsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."
}
}
},
"tabledata": {
"methods": {
"insertAll": {
"id": "bigquery.tabledata.insertAll",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}/insertAll",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}/insertAll",
"httpMethod": "POST",
"parameters": {
"projectId": {
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"tableId": {
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"request": {
"$ref": "TableDataInsertAllRequest"
},
"response": {
"$ref": "TableDataInsertAllResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.insertdata",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Streams data into BigQuery one record at a time without needing to run a\nload job."
},
"list": {
"id": "bigquery.tabledata.list",
"path": "bigquery/v2/projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}/data",
"flatPath": "bigquery/v2/projects/{projectsId}/datasets/{datasetsId}/tables/{tablesId}/data",
"httpMethod": "GET",
"parameters": {
"projectId": {
"description": "Required. Project id of the table to list.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"datasetId": {
"description": "Required. Dataset id of the table to list.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"tableId": {
"description": "Required. Table id of the table to list.",
"location": "path",
"required": true,
"pattern": "^[^\/]+$",
"type": "string"
},
"startIndex": {
"description": "Start row index of the table.",
"location": "query",
"type": "string",
"format": "uint64"
},
"maxResults": {
"description": "Row limit of the table.",
"location": "query",
"type": "integer",
"format": "uint32"
},
"pageToken": {
"description": "Page token of the request. When this token is non-empty, it\nindicates additional results are available.",
"location": "query",
"type": "string"
},
"selectedFields": {
"description": "Subset of fields to return, supports select into sub fields.\nExample: selected_fields = \"a,e.d.f\";",
"location": "query",
"type": "string"
},
"formatOptions.useInt64Timestamp": {
"description": "Optional. Output timestamp as usec int64. Default is false.",
"location": "query",
"type": "boolean"
}
},
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"response": {
"$ref": "TableDataList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
],
"description": "List the content of a table in rows."
}
}
}
},
"basePath": ""
}