google.cloud/plugins/modules/gcp_bigquery_table.py
2024-10-31 18:20:15 +01:00

1732 lines
58 KiB
Python

#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** Type: MMv1 ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_bigquery_table
description:
- A Table that belongs to a Dataset .
short_description: Creates a GCP Table
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
table_reference:
description:
- Reference describing the ID of this table.
required: false
type: dict
suboptions:
dataset_id:
description:
- The ID of the dataset containing this table.
required: false
type: str
project_id:
description:
- The ID of the project containing this table.
required: false
type: str
table_id:
description:
- The ID of the the table.
required: false
type: str
clustering:
description:
- One or more fields on which data should be clustered. Only top-level, non-repeated,
simple-type fields are supported. When you cluster a table using multiple columns,
the order of columns you specify is important. The order of the specified columns
determines the sort order of the data.
elements: str
required: false
type: list
description:
description:
- A user-friendly description of the dataset.
required: false
type: str
friendly_name:
description:
- A descriptive name for this table.
required: false
type: str
labels:
description:
- The labels associated with this dataset. You can use these to organize and group
your datasets .
required: false
type: dict
name:
description:
- Name of the table.
required: false
type: str
num_rows:
description:
- The number of rows of data in this table, excluding any data in the streaming
buffer.
required: false
type: int
view:
description:
- The view definition.
required: false
type: dict
suboptions:
use_legacy_sql:
description:
- Specifies whether to use BigQuery's legacy SQL for this view .
required: false
type: bool
user_defined_function_resources:
description:
- Describes user-defined function resources used in the query.
elements: dict
required: false
type: list
suboptions:
inline_code:
description:
- An inline resource that contains code for a user-defined function (UDF).
Providing a inline code resource is equivalent to providing a URI for
a file containing the same code.
required: false
type: str
resource_uri:
description:
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
required: false
type: str
time_partitioning:
description:
- If specified, configures time-based partitioning for this table.
required: false
type: dict
suboptions:
expiration_ms:
description:
- Number of milliseconds for which to keep the storage for a partition.
required: false
type: int
field:
description:
- If not set, the table is partitioned by pseudo column, referenced via either
'_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If
field is specified, the table is instead partitioned by this field. The
field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE
or REQUIRED.
required: false
type: str
type:
description:
- The only type supported is DAY, which will generate one partition per day.
- 'Some valid choices include: "DAY"'
required: false
type: str
schema:
description:
- Describes the schema of this table.
required: false
type: dict
suboptions:
fields:
description:
- Describes the fields in a table.
elements: dict
required: false
type: list
suboptions:
description:
description:
- The field description. The maximum length is 1,024 characters.
required: false
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to RECORD.
elements: str
required: false
type: list
mode:
description:
- The field mode.
- 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"'
required: false
type: str
name:
description:
- The field name.
required: false
type: str
type:
description:
- The field data type.
- 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT",
"TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"'
required: false
type: str
encryption_configuration:
description:
- Custom encryption configuration.
required: false
type: dict
suboptions:
kms_key_name:
description:
- Describes the Cloud KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your project
requires access to this encryption key.
required: false
type: str
expiration_time:
description:
- The time when this table expires, in milliseconds since the epoch. If not present,
the table will persist indefinitely.
required: false
type: int
external_data_configuration:
description:
- Describes the data format, location, and other properties of a table stored
outside of BigQuery. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
required: false
type: dict
suboptions:
autodetect:
description:
- Try to detect schema and format options automatically. Any option specified
explicitly will be honored.
required: false
type: bool
compression:
description:
- The compression type of the data source.
- 'Some valid choices include: "GZIP", "NONE"'
required: false
type: str
ignore_unknown_values:
description:
- Indicates if BigQuery should allow extra values that are not represented
in the table schema .
required: false
type: bool
max_bad_records:
description:
- The maximum number of bad records that BigQuery can ignore when reading
data .
required: false
type: int
source_format:
description:
- The data format.
- 'Some valid choices include: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON",
"AVRO", "DATASTORE_BACKUP", "BIGTABLE", "ORC"'
required: false
type: str
source_uris:
description:
- The fully-qualified URIs that point to your data in Google Cloud.
- 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard
character and it must come after the ''bucket'' name. Size limits related
to load jobs apply to external data sources. For Google Cloud Bigtable URIs:
Exactly one URI can be specified and it has be a fully specified and valid
HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore
backups, exactly one URI can be specified. Also, the ''*'' wildcard character
is not allowed.'
elements: str
required: false
type: list
schema:
description:
- The schema for the data. Schema is required for CSV and JSON formats.
required: false
type: dict
suboptions:
fields:
description:
- Describes the fields in a table.
elements: dict
required: false
type: list
suboptions:
description:
description:
- The field description.
required: false
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to
RECORD .
elements: str
required: false
type: list
mode:
description:
- Field mode.
- 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"'
required: false
type: str
name:
description:
- Field name.
required: false
type: str
type:
description:
- Field data type.
- 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT",
"TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"'
required: false
type: str
google_sheets_options:
description:
- Additional options if sourceFormat is set to GOOGLE_SHEETS.
required: false
type: dict
suboptions:
skip_leading_rows:
description:
- The number of rows at the top of a Google Sheet that BigQuery will skip
when reading the data.
required: false
type: int
csv_options:
description:
- Additional properties to set if sourceFormat is set to CSV.
required: false
type: dict
suboptions:
allow_jagged_rows:
description:
- Indicates if BigQuery should accept rows that are missing trailing optional
columns .
required: false
type: bool
allow_quoted_newlines:
description:
- Indicates if BigQuery should allow quoted data sections that contain
newline characters in a CSV file .
required: false
type: bool
encoding:
description:
- The character encoding of the data.
- 'Some valid choices include: "UTF-8", "ISO-8859-1"'
required: false
type: str
field_delimiter:
description:
- The separator for fields in a CSV file.
required: false
type: str
quote:
description:
- The value that is used to quote data sections in a CSV file.
required: false
type: str
skip_leading_rows:
description:
- The number of rows at the top of a CSV file that BigQuery will skip
when reading the data.
required: false
type: int
bigtable_options:
description:
- Additional options if sourceFormat is set to BIGTABLE.
required: false
type: dict
suboptions:
ignore_unspecified_column_families:
description:
- If field is true, then the column families that are not specified in
columnFamilies list are not exposed in the table schema .
required: false
type: bool
read_rowkey_as_string:
description:
- If field is true, then the rowkey column families will be read and converted
to string.
required: false
type: bool
column_families:
description:
- List of column families to expose in the table schema along with their
types.
elements: dict
required: false
type: list
suboptions:
columns:
description:
- Lists of columns that should be exposed as individual fields as
opposed to a list of (column name, value) pairs.
elements: dict
required: false
type: list
suboptions:
encoding:
description:
- The encoding of the values when the type is not STRING.
- 'Some valid choices include: "TEXT", "BINARY"'
required: false
type: str
field_name:
description:
- If the qualifier is not a valid BigQuery field identifier, a
valid identifier must be provided as the column field name and
is used as field name in queries.
required: false
type: str
only_read_latest:
description:
- If this is set, only the latest version of value in this column
are exposed .
required: false
type: bool
qualifier_string:
description:
- Qualifier of the column.
required: true
type: str
type:
description:
- The type to convert the value in cells of this column.
- 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT",
"BOOLEAN"'
required: false
type: str
encoding:
description:
- The encoding of the values when the type is not STRING.
- 'Some valid choices include: "TEXT", "BINARY"'
required: false
type: str
family_id:
description:
- Identifier of the column family.
required: false
type: str
only_read_latest:
description:
- If this is set only the latest version of value are exposed for
all columns in this column family .
required: false
type: bool
type:
description:
- The type to convert the value in cells of this column family.
- 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT",
"BOOLEAN"'
required: false
type: str
dataset:
description:
- Name of the dataset.
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
- accesstoken
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
access_token:
description:
- An OAuth2 access token if credential type is accesstoken.
type: str
scopes:
description:
- Array of scopes to be used
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
'''
EXAMPLES = '''
- name: create a dataset
google.cloud.gcp_bigquery_dataset:
name: example_dataset
dataset_reference:
dataset_id: example_dataset
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: dataset
- name: create a table
google.cloud.gcp_bigquery_table:
name: example_table
dataset: example_dataset
table_reference:
dataset_id: example_dataset
project_id: test_project
table_id: example_table
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
tableReference:
description:
- Reference describing the ID of this table.
returned: success
type: complex
contains:
datasetId:
description:
- The ID of the dataset containing this table.
returned: success
type: str
projectId:
description:
- The ID of the project containing this table.
returned: success
type: str
tableId:
description:
- The ID of the the table.
returned: success
type: str
clustering:
description:
- One or more fields on which data should be clustered. Only top-level, non-repeated,
simple-type fields are supported. When you cluster a table using multiple columns,
the order of columns you specify is important. The order of the specified columns
determines the sort order of the data.
returned: success
type: list
creationTime:
description:
- The time when this dataset was created, in milliseconds since the epoch.
returned: success
type: int
description:
description:
- A user-friendly description of the dataset.
returned: success
type: str
friendlyName:
description:
- A descriptive name for this table.
returned: success
type: str
id:
description:
- An opaque ID uniquely identifying the table.
returned: success
type: str
labels:
description:
- The labels associated with this dataset. You can use these to organize and group
your datasets .
returned: success
type: dict
lastModifiedTime:
description:
- The time when this table was last modified, in milliseconds since the epoch.
returned: success
type: int
location:
description:
- The geographic location where the table resides. This value is inherited from
the dataset.
returned: success
type: str
name:
description:
- Name of the table.
returned: success
type: str
numBytes:
description:
- The size of this table in bytes, excluding any data in the streaming buffer.
returned: success
type: int
numLongTermBytes:
description:
- The number of bytes in the table that are considered "long-term storage".
returned: success
type: int
numRows:
description:
- The number of rows of data in this table, excluding any data in the streaming
buffer.
returned: success
type: int
requirePartitionFilter:
description:
- If set to true, queries over this table require a partition filter that can be
used for partition elimination to be specified.
returned: success
type: bool
type:
description:
- Describes the table type.
returned: success
type: str
view:
description:
- The view definition.
returned: success
type: complex
contains:
useLegacySql:
description:
- Specifies whether to use BigQuery's legacy SQL for this view .
returned: success
type: bool
userDefinedFunctionResources:
description:
- Describes user-defined function resources used in the query.
returned: success
type: complex
contains:
inlineCode:
description:
- An inline resource that contains code for a user-defined function (UDF).
Providing a inline code resource is equivalent to providing a URI for
a file containing the same code.
returned: success
type: str
resourceUri:
description:
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
returned: success
type: str
timePartitioning:
description:
- If specified, configures time-based partitioning for this table.
returned: success
type: complex
contains:
expirationMs:
description:
- Number of milliseconds for which to keep the storage for a partition.
returned: success
type: int
field:
description:
- If not set, the table is partitioned by pseudo column, referenced via either
'_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field
is specified, the table is instead partitioned by this field. The field must
be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.
returned: success
type: str
type:
description:
- The only type supported is DAY, which will generate one partition per day.
returned: success
type: str
streamingBuffer:
description:
- Contains information regarding this table's streaming buffer, if one is present.
This field will be absent if the table is not being streamed to or if there is
no data in the streaming buffer.
returned: success
type: complex
contains:
estimatedBytes:
description:
- A lower-bound estimate of the number of bytes currently in the streaming buffer.
returned: success
type: int
estimatedRows:
description:
- A lower-bound estimate of the number of rows currently in the streaming buffer.
returned: success
type: int
oldestEntryTime:
description:
- Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds
since the epoch, if the streaming buffer is available.
returned: success
type: int
schema:
description:
- Describes the schema of this table.
returned: success
type: complex
contains:
fields:
description:
- Describes the fields in a table.
returned: success
type: complex
contains:
description:
description:
- The field description. The maximum length is 1,024 characters.
returned: success
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to RECORD.
returned: success
type: list
mode:
description:
- The field mode.
returned: success
type: str
name:
description:
- The field name.
returned: success
type: str
type:
description:
- The field data type.
returned: success
type: str
encryptionConfiguration:
description:
- Custom encryption configuration.
returned: success
type: complex
contains:
kmsKeyName:
description:
- Describes the Cloud KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your project
requires access to this encryption key.
returned: success
type: str
expirationTime:
description:
- The time when this table expires, in milliseconds since the epoch. If not present,
the table will persist indefinitely.
returned: success
type: int
externalDataConfiguration:
description:
- Describes the data format, location, and other properties of a table stored outside
of BigQuery. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
returned: success
type: complex
contains:
autodetect:
description:
- Try to detect schema and format options automatically. Any option specified
explicitly will be honored.
returned: success
type: bool
compression:
description:
- The compression type of the data source.
returned: success
type: str
ignoreUnknownValues:
description:
- Indicates if BigQuery should allow extra values that are not represented in
the table schema .
returned: success
type: bool
maxBadRecords:
description:
- The maximum number of bad records that BigQuery can ignore when reading data
.
returned: success
type: int
sourceFormat:
description:
- The data format.
returned: success
type: str
sourceUris:
description:
- The fully-qualified URIs that point to your data in Google Cloud.
- 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character
and it must come after the ''bucket'' name. Size limits related to load jobs
apply to external data sources. For Google Cloud Bigtable URIs: Exactly one
URI can be specified and it has be a fully specified and valid HTTPS URL for
a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly
one URI can be specified. Also, the ''*'' wildcard character is not allowed.'
returned: success
type: list
schema:
description:
- The schema for the data. Schema is required for CSV and JSON formats.
returned: success
type: complex
contains:
fields:
description:
- Describes the fields in a table.
returned: success
type: complex
contains:
description:
description:
- The field description.
returned: success
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to
RECORD .
returned: success
type: list
mode:
description:
- Field mode.
returned: success
type: str
name:
description:
- Field name.
returned: success
type: str
type:
description:
- Field data type.
returned: success
type: str
googleSheetsOptions:
description:
- Additional options if sourceFormat is set to GOOGLE_SHEETS.
returned: success
type: complex
contains:
skipLeadingRows:
description:
- The number of rows at the top of a Google Sheet that BigQuery will skip
when reading the data.
returned: success
type: int
csvOptions:
description:
- Additional properties to set if sourceFormat is set to CSV.
returned: success
type: complex
contains:
allowJaggedRows:
description:
- Indicates if BigQuery should accept rows that are missing trailing optional
columns .
returned: success
type: bool
allowQuotedNewlines:
description:
- Indicates if BigQuery should allow quoted data sections that contain newline
characters in a CSV file .
returned: success
type: bool
encoding:
description:
- The character encoding of the data.
returned: success
type: str
fieldDelimiter:
description:
- The separator for fields in a CSV file.
returned: success
type: str
quote:
description:
- The value that is used to quote data sections in a CSV file.
returned: success
type: str
skipLeadingRows:
description:
- The number of rows at the top of a CSV file that BigQuery will skip when
reading the data.
returned: success
type: int
bigtableOptions:
description:
- Additional options if sourceFormat is set to BIGTABLE.
returned: success
type: complex
contains:
ignoreUnspecifiedColumnFamilies:
description:
- If field is true, then the column families that are not specified in columnFamilies
list are not exposed in the table schema .
returned: success
type: bool
readRowkeyAsString:
description:
- If field is true, then the rowkey column families will be read and converted
to string.
returned: success
type: bool
columnFamilies:
description:
- List of column families to expose in the table schema along with their
types.
returned: success
type: complex
contains:
columns:
description:
- Lists of columns that should be exposed as individual fields as opposed
to a list of (column name, value) pairs.
returned: success
type: complex
contains:
encoding:
description:
- The encoding of the values when the type is not STRING.
returned: success
type: str
fieldName:
description:
- If the qualifier is not a valid BigQuery field identifier, a valid
identifier must be provided as the column field name and is used
as field name in queries.
returned: success
type: str
onlyReadLatest:
description:
- If this is set, only the latest version of value in this column
are exposed .
returned: success
type: bool
qualifierString:
description:
- Qualifier of the column.
returned: success
type: str
type:
description:
- The type to convert the value in cells of this column.
returned: success
type: str
encoding:
description:
- The encoding of the values when the type is not STRING.
returned: success
type: str
familyId:
description:
- Identifier of the column family.
returned: success
type: str
onlyReadLatest:
description:
- If this is set only the latest version of value are exposed for all
columns in this column family .
returned: success
type: bool
type:
description:
- The type to convert the value in cells of this column family.
returned: success
type: str
dataset:
description:
- Name of the dataset.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import (
navigate_hash,
GcpSession,
GcpModule,
GcpRequest,
remove_nones_from_dict,
)
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
table_reference=dict(type='dict', options=dict(dataset_id=dict(type='str'), project_id=dict(type='str'), table_id=dict(type='str'))),
clustering=dict(type='list', elements='str'),
description=dict(type='str'),
friendly_name=dict(type='str'),
labels=dict(type='dict'),
name=dict(type='str'),
num_rows=dict(type='int'),
view=dict(
type='dict',
options=dict(
use_legacy_sql=dict(type='bool'),
user_defined_function_resources=dict(
type='list', elements='dict', options=dict(inline_code=dict(type='str'), resource_uri=dict(type='str'))
),
),
),
time_partitioning=dict(type='dict', options=dict(expiration_ms=dict(type='int'), field=dict(type='str'), type=dict(type='str'))),
schema=dict(
type='dict',
options=dict(
fields=dict(
type='list',
elements='dict',
options=dict(
description=dict(type='str'),
fields=dict(type='list', elements='str'),
mode=dict(type='str'),
name=dict(type='str'),
type=dict(type='str'),
),
)
),
),
encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(type='str'))),
expiration_time=dict(type='int'),
external_data_configuration=dict(
type='dict',
options=dict(
autodetect=dict(type='bool'),
compression=dict(type='str'),
ignore_unknown_values=dict(type='bool'),
max_bad_records=dict(default=0, type='int'),
source_format=dict(type='str'),
source_uris=dict(type='list', elements='str'),
schema=dict(
type='dict',
options=dict(
fields=dict(
type='list',
elements='dict',
options=dict(
description=dict(type='str'),
fields=dict(type='list', elements='str'),
mode=dict(type='str'),
name=dict(type='str'),
type=dict(type='str'),
),
)
),
),
google_sheets_options=dict(type='dict', options=dict(skip_leading_rows=dict(default=0, type='int'))),
csv_options=dict(
type='dict',
options=dict(
allow_jagged_rows=dict(type='bool'),
allow_quoted_newlines=dict(type='bool'),
encoding=dict(type='str'),
field_delimiter=dict(type='str'),
quote=dict(type='str'),
skip_leading_rows=dict(default=0, type='int'),
),
),
bigtable_options=dict(
type='dict',
options=dict(
ignore_unspecified_column_families=dict(type='bool'),
read_rowkey_as_string=dict(type='bool'),
column_families=dict(
type='list',
elements='dict',
options=dict(
columns=dict(
type='list',
elements='dict',
options=dict(
encoding=dict(type='str'),
field_name=dict(type='str'),
only_read_latest=dict(type='bool'),
qualifier_string=dict(required=True, type='str'),
type=dict(type='str'),
),
),
encoding=dict(type='str'),
family_id=dict(type='str'),
only_read_latest=dict(type='bool'),
type=dict(type='str'),
),
),
),
),
),
),
dataset=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery']
state = module.params['state']
kind = 'bigquery#table'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.post(link, resource_to_request(module)), kind)
def update(module, link, kind):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.put(link, resource_to_request(module)), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.delete(link), kind)
def resource_to_request(module):
request = {
u'kind': 'bigquery#table',
u'tableReference': TableTablereference(module.params.get('table_reference', {}), module).to_request(),
u'clustering': TableClustering(module.params.get('clustering', {}), module).to_request(),
u'description': module.params.get('description'),
u'friendlyName': module.params.get('friendly_name'),
u'labels': module.params.get('labels'),
u'name': module.params.get('name'),
u'numRows': module.params.get('num_rows'),
u'view': TableView(module.params.get('view', {}), module).to_request(),
u'timePartitioning': TableTimepartitioning(module.params.get('time_partitioning', {}), module).to_request(),
u'schema': TableSchema(module.params.get('schema', {}), module).to_request(),
u'encryptionConfiguration': TableEncryptionconfiguration(module.params.get('encryption_configuration', {}), module).to_request(),
u'expirationTime': module.params.get('expiration_time'),
u'externalDataConfiguration': TableExternaldataconfiguration(module.params.get('external_data_configuration', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables/{name}".format(**module.params)
def collection(module):
return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'tableReference': TableTablereference(response.get(u'tableReference', {}), module).from_response(),
u'clustering': TableClustering(response.get(u'clustering', {}), module).from_response(),
u'creationTime': response.get(u'creationTime'),
u'description': response.get(u'description'),
u'friendlyName': response.get(u'friendlyName'),
u'id': response.get(u'id'),
u'labels': response.get(u'labels'),
u'lastModifiedTime': response.get(u'lastModifiedTime'),
u'location': response.get(u'location'),
u'name': response.get(u'name'),
u'numBytes': response.get(u'numBytes'),
u'numLongTermBytes': response.get(u'numLongTermBytes'),
u'numRows': response.get(u'numRows'),
u'requirePartitionFilter': response.get(u'requirePartitionFilter'),
u'type': response.get(u'type'),
u'view': TableView(response.get(u'view', {}), module).from_response(),
u'timePartitioning': TableTimepartitioning(response.get(u'timePartitioning', {}), module).from_response(),
u'streamingBuffer': TableStreamingbuffer(response.get(u'streamingBuffer', {}), module).from_response(),
u'schema': TableSchema(response.get(u'schema', {}), module).from_response(),
u'encryptionConfiguration': TableEncryptionconfiguration(response.get(u'encryptionConfiguration', {}), module).from_response(),
u'expirationTime': response.get(u'expirationTime'),
u'externalDataConfiguration': TableExternaldataconfiguration(response.get(u'externalDataConfiguration', {}), module).from_response(),
}
class TableTablereference(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id'), u'tableId': self.request.get('table_id')}
)
def from_response(self):
return remove_nones_from_dict(
{u'datasetId': self.request.get(u'datasetId'), u'projectId': self.request.get(u'projectId'), u'tableId': self.request.get(u'tableId')}
)
class TableView(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'useLegacySql': self.request.get('use_legacy_sql'),
u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray(
self.request.get('user_defined_function_resources', []), self.module
).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'useLegacySql': self.request.get(u'useLegacySql'),
u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray(
self.request.get(u'userDefinedFunctionResources', []), self.module
).from_response(),
}
)
class TableUserdefinedfunctionresourcesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'inlineCode': item.get('inline_code'), u'resourceUri': item.get('resource_uri')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'inlineCode': item.get(u'inlineCode'), u'resourceUri': item.get(u'resourceUri')})
class TableTimepartitioning(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'expirationMs': self.request.get('expiration_ms'), u'field': self.request.get('field'), u'type': self.request.get('type')}
)
def from_response(self):
return remove_nones_from_dict(
{u'expirationMs': self.request.get(u'expirationMs'), u'field': self.request.get(u'field'), u'type': self.request.get(u'type')}
)
class TableStreamingbuffer(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({})
def from_response(self):
return remove_nones_from_dict({})
class TableSchema(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()})
def from_response(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()})
class TableFieldsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get('description'),
u'fields': item.get('fields'),
u'mode': item.get('mode'),
u'name': item.get('name'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get(u'description'),
u'fields': item.get(u'fields'),
u'mode': item.get(u'mode'),
u'name': item.get(u'name'),
u'type': item.get(u'type'),
}
)
class TableEncryptionconfiguration(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')})
def from_response(self):
return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')})
class TableExternaldataconfiguration(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'autodetect': self.request.get('autodetect'),
u'compression': self.request.get('compression'),
u'ignoreUnknownValues': self.request.get('ignore_unknown_values'),
u'maxBadRecords': self.request.get('max_bad_records'),
u'sourceFormat': self.request.get('source_format'),
u'sourceUris': self.request.get('source_uris'),
u'schema': TableSchema(self.request.get('schema', {}), self.module).to_request(),
u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get('google_sheets_options', {}), self.module).to_request(),
u'csvOptions': TableCsvoptions(self.request.get('csv_options', {}), self.module).to_request(),
u'bigtableOptions': TableBigtableoptions(self.request.get('bigtable_options', {}), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'autodetect': self.request.get(u'autodetect'),
u'compression': self.request.get(u'compression'),
u'ignoreUnknownValues': self.request.get(u'ignoreUnknownValues'),
u'maxBadRecords': self.request.get(u'maxBadRecords'),
u'sourceFormat': self.request.get(u'sourceFormat'),
u'sourceUris': self.request.get(u'sourceUris'),
u'schema': TableSchema(self.request.get(u'schema', {}), self.module).from_response(),
u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get(u'googleSheetsOptions', {}), self.module).from_response(),
u'csvOptions': TableCsvoptions(self.request.get(u'csvOptions', {}), self.module).from_response(),
u'bigtableOptions': TableBigtableoptions(self.request.get(u'bigtableOptions', {}), self.module).from_response(),
}
)
class TableSchema(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()})
def from_response(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()})
class TableFieldsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get('description'),
u'fields': item.get('fields'),
u'mode': item.get('mode'),
u'name': item.get('name'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get(u'description'),
u'fields': item.get(u'fields'),
u'mode': item.get(u'mode'),
u'name': item.get(u'name'),
u'type': item.get(u'type'),
}
)
class TableGooglesheetsoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'skipLeadingRows': self.request.get('skip_leading_rows')})
def from_response(self):
return remove_nones_from_dict({u'skipLeadingRows': self.request.get(u'skipLeadingRows')})
class TableCsvoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'allowJaggedRows': self.request.get('allow_jagged_rows'),
u'allowQuotedNewlines': self.request.get('allow_quoted_newlines'),
u'encoding': self.request.get('encoding'),
u'fieldDelimiter': self.request.get('field_delimiter'),
u'quote': self.request.get('quote'),
u'skipLeadingRows': self.request.get('skip_leading_rows'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'allowJaggedRows': self.request.get(u'allowJaggedRows'),
u'allowQuotedNewlines': self.request.get(u'allowQuotedNewlines'),
u'encoding': self.request.get(u'encoding'),
u'fieldDelimiter': self.request.get(u'fieldDelimiter'),
u'quote': self.request.get(u'quote'),
u'skipLeadingRows': self.request.get(u'skipLeadingRows'),
}
)
class TableBigtableoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'ignoreUnspecifiedColumnFamilies': self.request.get('ignore_unspecified_column_families'),
u'readRowkeyAsString': self.request.get('read_rowkey_as_string'),
u'columnFamilies': TableColumnfamiliesArray(self.request.get('column_families', []), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'ignoreUnspecifiedColumnFamilies': self.request.get(u'ignoreUnspecifiedColumnFamilies'),
u'readRowkeyAsString': self.request.get(u'readRowkeyAsString'),
u'columnFamilies': TableColumnfamiliesArray(self.request.get(u'columnFamilies', []), self.module).from_response(),
}
)
class TableColumnfamiliesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'columns': TableColumnsArray(item.get('columns', []), self.module).to_request(),
u'encoding': item.get('encoding'),
u'familyId': item.get('family_id'),
u'onlyReadLatest': item.get('only_read_latest'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'columns': TableColumnsArray(item.get(u'columns', []), self.module).from_response(),
u'encoding': item.get(u'encoding'),
u'familyId': item.get(u'familyId'),
u'onlyReadLatest': item.get(u'onlyReadLatest'),
u'type': item.get(u'type'),
}
)
class TableColumnsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'encoding': item.get('encoding'),
u'fieldName': item.get('field_name'),
u'onlyReadLatest': item.get('only_read_latest'),
u'qualifierString': item.get('qualifier_string'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'encoding': item.get(u'encoding'),
u'fieldName': item.get(u'fieldName'),
u'onlyReadLatest': item.get(u'onlyReadLatest'),
u'qualifierString': item.get(u'qualifierString'),
u'type': item.get(u'type'),
}
)
class TableClustering(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({'fields': self.request})
def from_response(self):
return remove_nones_from_dict({'fields': self.request})
if __name__ == '__main__':
main()