|
22 | 22 | import datetime |
23 | 23 | from decimal import Decimal |
24 | 24 | import random |
25 | | -import operator |
26 | 25 | import uuid |
27 | 26 |
|
28 | 27 | from google import auth |
29 | | -import google.api_core.exceptions |
30 | 28 | from google.cloud.bigquery import dbapi |
31 | 29 | from google.cloud.bigquery.table import ( |
32 | 30 | RangePartitioning, |
@@ -1047,11 +1045,6 @@ def dbapi(cls): |
1047 | 1045 | def import_dbapi(cls): |
1048 | 1046 | return dbapi |
1049 | 1047 |
|
1050 | | - @staticmethod |
1051 | | - def _build_formatted_table_id(table): |
1052 | | - """Build '<dataset_id>.<table_id>' string using given table.""" |
1053 | | - return "{}.{}".format(table.reference.dataset_id, table.table_id) |
1054 | | - |
1055 | 1048 | @staticmethod |
1056 | 1049 | def _add_default_dataset_to_job_config(job_config, project_id, dataset_id): |
1057 | 1050 | # If dataset_id is set, then we know the job_config isn't None |
@@ -1100,36 +1093,34 @@ def create_connect_args(self, url): |
1100 | 1093 | ) |
1101 | 1094 | return ([], {"client": client}) |
1102 | 1095 |
|
1103 | | - def _get_table_or_view_names(self, connection, item_types, schema=None): |
1104 | | - current_schema = schema or self.dataset_id |
1105 | | - get_table_name = ( |
1106 | | - self._build_formatted_table_id |
1107 | | - if self.dataset_id is None |
1108 | | - else operator.attrgetter("table_id") |
1109 | | - ) |
| 1096 | + def _get_default_schema_name(self, connection) -> str: |
| 1097 | + return connection.dialect.dataset_id |
1110 | 1098 |
|
| 1099 | + def _get_table_or_view_names(self, connection, item_types, schema=None): |
1111 | 1100 | client = connection.connection._client |
1112 | | - datasets = client.list_datasets() |
1113 | | - |
1114 | | - result = [] |
1115 | | - for dataset in datasets: |
1116 | | - if current_schema is not None and current_schema != dataset.dataset_id: |
1117 | | - continue |
1118 | | - |
1119 | | - try: |
1120 | | - tables = client.list_tables( |
1121 | | - dataset.reference, page_size=self.list_tables_page_size |
| 1101 | + # `schema=None` means to search the default schema. If one isn't set in the |
| 1102 | + # connection string, then we have nothing to search so return an empty list. |
| 1103 | + # |
| 1104 | + # When using Alembic with `include_schemas=False`, it expects to compare to a |
| 1105 | + # single schema. If `include_schemas=True`, it will enumerate all schemas and |
| 1106 | + # then call `get_table_names`/`get_view_names` for each schema. |
| 1107 | + current_schema = schema or self.default_schema_name |
| 1108 | + if current_schema is None: |
| 1109 | + return [] |
| 1110 | + try: |
| 1111 | + return [ |
| 1112 | + table.table_id |
| 1113 | + for table in client.list_tables( |
| 1114 | + current_schema, page_size=self.list_tables_page_size |
1122 | 1115 | ) |
1123 | | - for table in tables: |
1124 | | - if table.table_type in item_types: |
1125 | | - result.append(get_table_name(table)) |
1126 | | - except google.api_core.exceptions.NotFound: |
1127 | | - # It's possible that the dataset was deleted between when we |
1128 | | - # fetched the list of datasets and when we try to list the |
1129 | | - # tables from it. See: |
1130 | | - # https://github.com/googleapis/python-bigquery-sqlalchemy/issues/105 |
1131 | | - pass |
1132 | | - return result |
| 1116 | + if table.table_type in item_types |
| 1117 | + ] |
| 1118 | + except NotFound: |
| 1119 | + # It's possible that the dataset was deleted between when we |
| 1120 | + # fetched the list of datasets and when we try to list the |
| 1121 | + # tables from it. See: |
| 1122 | + # https://github.com/googleapis/python-bigquery-sqlalchemy/issues/105 |
| 1123 | + pass |
1133 | 1124 |
|
1134 | 1125 | @staticmethod |
1135 | 1126 | def _split_table_name(full_table_name): |
|
0 commit comments