Skip to content

Commit a7e440a

Browse files
style: fix linting
1 parent fda6e7b commit a7e440a

File tree

4 files changed

+21
-20
lines changed

4 files changed

+21
-20
lines changed

src/dve/core_engine/backends/implementations/duckdb/readers/json.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
"""A csv reader to create duckdb relations"""
22

33
# pylint: disable=arguments-differ
4-
from typing import Any, Dict, Iterator, Optional, Type
4+
from collections.abc import Iterator
5+
from typing import Any, Optional
56

67
from duckdb import DuckDBPyRelation, read_json
78
from pydantic import BaseModel
@@ -25,18 +26,18 @@ def __init__(self, json_format: Optional[str] = "array"):
2526
super().__init__()
2627

2728
def read_to_py_iterator(
28-
self, resource: URI, entity_name: EntityName, schema: Type[BaseModel]
29-
) -> Iterator[Dict[str, Any]]:
29+
self, resource: URI, entity_name: EntityName, schema: type[BaseModel]
30+
) -> Iterator[dict[str, Any]]:
3031
"""Creates an iterable object of rows as dictionaries"""
3132
return self.read_to_relation(resource, entity_name, schema).pl().iter_rows(named=True)
3233

3334
@read_function(DuckDBPyRelation)
3435
def read_to_relation( # pylint: disable=unused-argument
35-
self, resource: URI, entity_name: EntityName, schema: Type[BaseModel]
36+
self, resource: URI, entity_name: EntityName, schema: type[BaseModel]
3637
) -> DuckDBPyRelation:
3738
"""Returns a relation object from the source json"""
3839

39-
ddb_schema: Dict[str, SQLType] = {
40+
ddb_schema: dict[str, SQLType] = {
4041
fld.name: str(get_duckdb_type_from_annotation(fld.annotation)) # type: ignore
4142
for fld in schema.__fields__.values()
4243
}

src/dve/core_engine/backends/implementations/spark/readers/csv.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""A reader implementation using the Databricks Spark CSV reader."""
22

3-
4-
from typing import Any, Dict, Iterator, Type
3+
from collections.abc import Iterator
4+
from typing import Any, Optional
55

66
from pydantic import BaseModel
77
from pyspark.sql import DataFrame, SparkSession
@@ -30,7 +30,7 @@ def __init__(
3030
header: bool = True,
3131
multi_line: bool = False,
3232
encoding: str = "utf-8-sig",
33-
spark_session: SparkSession = None,
33+
spark_session: Optional[SparkSession] = None,
3434
) -> None:
3535

3636
self.delimiter = delimiter
@@ -39,13 +39,13 @@ def __init__(
3939
self.quote_char = quote_char
4040
self.header = header
4141
self.multi_line = multi_line
42-
self.spark_session = spark_session if spark_session else SparkSession.builder.getOrCreate()
42+
self.spark_session = spark_session if spark_session else SparkSession.builder.getOrCreate() # type: ignore # pylint: disable=C0301
4343

4444
super().__init__()
4545

4646
def read_to_py_iterator(
47-
self, resource: URI, entity_name: EntityName, schema: Type[BaseModel]
48-
) -> Iterator[Dict[URI, Any]]:
47+
self, resource: URI, entity_name: EntityName, schema: type[BaseModel]
48+
) -> Iterator[dict[URI, Any]]:
4949
df = self.read_to_dataframe(resource, entity_name, schema)
5050
yield from (record.asDict(True) for record in df.toLocalIterator())
5151

@@ -54,7 +54,7 @@ def read_to_dataframe(
5454
self,
5555
resource: URI,
5656
entity_name: EntityName, # pylint: disable=unused-argument
57-
schema: Type[BaseModel],
57+
schema: type[BaseModel],
5858
) -> DataFrame:
5959
"""Read a CSV file directly to a Spark DataFrame."""
6060
if get_content_length(resource) == 0:

src/dve/core_engine/backends/implementations/spark/readers/json.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""A reader implementation using the Databricks Spark JSON reader."""
22

3-
4-
from typing import Any, Dict, Iterator, Optional, Type
3+
from collections.abc import Iterator
4+
from typing import Any, Optional
55

66
from pydantic import BaseModel
77
from pyspark.sql import DataFrame, SparkSession
@@ -31,13 +31,13 @@ def __init__(
3131

3232
self.encoding = encoding
3333
self.multi_line = multi_line
34-
self.spark_session = spark_session if spark_session else SparkSession.builder.getOrCreate()
34+
self.spark_session = spark_session if spark_session else SparkSession.builder.getOrCreate() # type: ignore # pylint: disable=C0301
3535

3636
super().__init__()
3737

3838
def read_to_py_iterator(
39-
self, resource: URI, entity_name: EntityName, schema: Type[BaseModel]
40-
) -> Iterator[Dict[URI, Any]]:
39+
self, resource: URI, entity_name: EntityName, schema: type[BaseModel]
40+
) -> Iterator[dict[URI, Any]]:
4141
df = self.read_to_dataframe(resource, entity_name, schema)
4242
yield from (record.asDict(True) for record in df.toLocalIterator())
4343

@@ -46,7 +46,7 @@ def read_to_dataframe(
4646
self,
4747
resource: URI,
4848
entity_name: EntityName, # pylint: disable=unused-argument
49-
schema: Type[BaseModel],
49+
schema: type[BaseModel],
5050
) -> DataFrame:
5151
"""Read a JSON file directly to a Spark DataFrame."""
5252
if get_content_length(resource) == 0:

src/dve/pipeline/pipeline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -532,9 +532,9 @@ def apply_business_rules(self, submission_info: SubmissionInfo, failed: bool):
532532
entity_name,
533533
),
534534
)
535-
entity_manager.entities[entity_name] = self.step_implementations.read_parquet(
535+
entity_manager.entities[entity_name] = self.step_implementations.read_parquet( # type: ignore
536536
projected
537-
) # type: ignore
537+
)
538538

539539
status = SubmissionStatus(
540540
failed=failed,

0 commit comments

Comments
 (0)