diff --git a/cwms/__init__.py b/cwms/__init__.py index cd20e2d9..23162dc5 100644 --- a/cwms/__init__.py +++ b/cwms/__init__.py @@ -1,13 +1,16 @@ from importlib.metadata import PackageNotFoundError, version from cwms.api import * +from cwms.catalog.blobs import * from cwms.catalog.catalog import * +from cwms.catalog.clobs import * from cwms.datafile_imports.shef_critfile_import import * from cwms.forecast.forecast_instance import * from cwms.forecast.forecast_spec import * from cwms.levels.location_levels import * from cwms.levels.specified_levels import * from cwms.locations.gate_changes import * +from cwms.locations.location_groups import * from cwms.locations.physical_locations import * from cwms.outlets.outlets import * from cwms.outlets.virtual_outlets import * @@ -21,6 +24,7 @@ from cwms.timeseries.timerseries_identifier import * from cwms.timeseries.timeseries import * from cwms.timeseries.timeseries_bin import * +from cwms.timeseries.timeseries_group import * from cwms.timeseries.timeseries_profile import * from cwms.timeseries.timeseries_profile_instance import * from cwms.timeseries.timeseries_profile_parser import * diff --git a/cwms/api.py b/cwms/api.py index 17267ffb..31afe02a 100644 --- a/cwms/api.py +++ b/cwms/api.py @@ -1,4 +1,4 @@ -""" Session management and REST functions for CWMS Data API. +"""Session management and REST functions for CWMS Data API. This module provides functions for making REST calls to the CWMS Data API (CDA). These functions should be used internally to interact with the API. The user should not have to diff --git a/cwms/catalog/blobs.py b/cwms/catalog/blobs.py new file mode 100644 index 00000000..3e622697 --- /dev/null +++ b/cwms/catalog/blobs.py @@ -0,0 +1,85 @@ +from typing import Optional + +import cwms.api as api +from cwms.cwms_types import JSON, Data + + +def get_blob(blob_id: str, office_id: str) -> Data: + """Get a single clob. + + Parameters + ---------- + blob_id: string + Specifies the id of the blob + office_id: string + Specifies the office of the blob. + + + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = f"blobs/{blob_id}" + params = {"office": office_id} + response = api.get(endpoint, params, api_version=1) + return Data(response) + + +def get_blobs( + office_id: Optional[str] = None, + page_size: Optional[int] = 100, + blob_id_like: Optional[str] = None, +) -> Data: + """Get a subset of Blobs + + Parameters + ---------- + office_id: Optional[string] + Specifies the office of the blob. + page_sie: Optional[Integer] + How many entries per page returned. Default 100. + blob_id_like: Optional[string] + Posix regular expression matching against the clob id + + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = "blobs" + params = {"office": office_id, "page-size": page_size, "like": blob_id_like} + + response = api.get(endpoint, params, api_version=1) + return Data(response, selector="blobs") + + +def store_blobs(data: JSON, fail_if_exists: Optional[bool] = True) -> None: + """Create New Blob + + Parameters + ---------- + Data: JSON dictionary + JSON containing information of Blob to be updated + { + "office-id": "string", + "id": "string", + "description": "string", + "media-type-id": "string", + "value": "string" + } + fail_if_exists: Boolean + Create will fail if provided ID already exists. Default: true + + Returns + ------- + None + """ + + if not isinstance(data, dict): + raise ValueError("Cannot store a Blob without a JSON data dictionary") + + endpoint = "blobs" + params = {"fail-if-exists": fail_if_exists} + + return api.post(endpoint, data, params, api_version=1) diff --git a/cwms/catalog/clobs.py b/cwms/catalog/clobs.py new file mode 100644 index 00000000..16506431 --- /dev/null +++ b/cwms/catalog/clobs.py @@ -0,0 +1,158 @@ +from typing import Optional + +import cwms.api as api +from cwms.cwms_types import JSON, Data + + +def get_clob(clob_id: str, office_id: str, clob_id_query: Optional[str] = None) -> Data: + """Get a single clob. + + Parameters + ---------- + clob_id: string + Specifies the id of the clob + office_id: string + Specifies the office of the clob. + clob_id_query: string + If this query parameter is provided the id path parameter is ignored and the + value of the query parameter is used. Note: this query parameter is necessary + for id's that contain '/' or other special characters. Because of abuse even + properly escaped '/' in url paths are blocked. When using this query parameter + a valid path parameter must still be provided for the request to be properly + routed. If your clob id contains '/' you can't specify the clob-id query + parameter and also specify the id path parameter because firewall and/or server + rules will deny the request even though you are specifying this override. "ignored" + is suggested. + + + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = f"clobs/{clob_id}" + params = { + "office": office_id, + "clob-id-query": clob_id_query, + } + response = api.get(endpoint, params) + return Data(response) + + +def get_clobs( + office_id: Optional[str] = None, + page_size: Optional[int] = 100, + include_values: Optional[bool] = False, + clob_id_like: Optional[str] = None, +) -> Data: + """Get a subset of Clobs + + Parameters + ---------- + office_id: Optional[string] + Specifies the office of the clob. + page_sie: Optional[Integer] + How many entries per page returned. Default 100. + include_values: Optional[Boolean] + Do you want the value associated with this particular clob (default: false) + clob_id_like: Optional[string] + Posix regular expression matching against the clob id + + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = "clobs" + params = { + "office": office_id, + "page-size": page_size, + "include-values": include_values, + "like": clob_id_like, + } + + response = api.get(endpoint, params) + return Data(response, selector="clobs") + + +def delete_clob(clob_id: str, office_id: str) -> None: + """Deletes requested clob + + Parameters + ---------- + clob_id: string + Specifies the id of the clob to be deleted + office_id: string + Specifies the office of the clob. + + Returns + ------- + None + """ + + endpoint = f"clobs/{clob_id}" + params = {"office": office_id} + + return api.delete(endpoint, params=params, api_version=1) + + +def update_clob(data: JSON, clob_id: str, ignore_nulls: Optional[bool] = True) -> None: + """Updates clob + + Parameters + ---------- + Data: JSON dictionary + JSON containing information of Clob to be updated + { + "office-id": "string", + "id": "string", + "description": "string", + "value": "string" + } + clob_id: string + Specifies the id of the clob to be deleted + ignore_nulls: Boolean + If true, null and empty fields in the provided clob will be ignored and the existing value of those fields left in place. Default: true + + Returns + ------- + None + """ + + if not isinstance(data, dict): + raise ValueError("Cannot store a Clob without a JSON data dictionary") + + endpoint = f"clobs/{clob_id}" + params = {"ignore-nulls": ignore_nulls} + + return api.patch(endpoint, data, params, api_version=1) + + +def store_clobs(data: JSON, fail_if_exists: Optional[bool] = True) -> None: + """Create New Clob + + Parameters + ---------- + Data: JSON dictionary + JSON containing information of Clob to be updated + { + "office-id": "string", + "id": "string", + "description": "string", + "value": "string" + } + fail_if_exists: Boolean + Create will fail if provided ID already exists. Default: true + + Returns + ------- + None + """ + + if not isinstance(data, dict): + raise ValueError("Cannot store a Clob without a JSON data dictionary") + + endpoint = "clobs" + params = {"fail-if-exists": fail_if_exists} + + return api.post(endpoint, data, params, api_version=1) diff --git a/cwms/datafile_imports/shef_critfile_import.py b/cwms/datafile_imports/shef_critfile_import.py index b0b5593c..11dc9669 100644 --- a/cwms/datafile_imports/shef_critfile_import.py +++ b/cwms/datafile_imports/shef_critfile_import.py @@ -3,10 +3,7 @@ import pandas as pd -from cwms.timeseries.timeseries import ( - timeseries_group_df_to_json, - update_timeseries_groups, -) +import cwms def import_critfile_to_ts_group( @@ -15,6 +12,7 @@ def import_critfile_to_ts_group( group_id: str = "SHEF Data Acquisition", category_id: str = "Data Acquisition", group_office_id: str = "CWMS", + category_office_id: str = "CWMS", replace_assigned_ts: bool = False, ) -> None: """ @@ -116,9 +114,15 @@ def append_df( df = append_df(df, office_id, data["Timeseries ID"], data["Alias"]) # Generate JSON dictionary - json_dict = timeseries_group_df_to_json(df, group_id, group_office_id, category_id) + json_dict = cwms.timeseries_group_df_to_json( + data=df, + group_id=group_id, + group_office_id=group_office_id, + category_office_id=category_office_id, + category_id=category_id, + ) - update_timeseries_groups( + cwms.update_timeseries_groups( group_id=group_id, office_id=office_id, replace_assigned_ts=replace_assigned_ts, diff --git a/cwms/locations/location_groups.py b/cwms/locations/location_groups.py new file mode 100644 index 00000000..c2d965ee --- /dev/null +++ b/cwms/locations/location_groups.py @@ -0,0 +1,166 @@ +from typing import Optional + +import pandas as pd +from pandas import DataFrame + +import cwms.api as api +from cwms.cwms_types import JSON, Data + + +def get_location_group( + loc_group_id: str, + category_id: str, + office_id: str, + group_office_id: str, + category_office_id: str, +) -> Data: + """Retreives time series stored in the requested time series group + + Parameters + ---------- + group_id: string + Location group whose data is to be included in the response. + category_id: string + The category id that contains the Location group. + office_id: string + The owning office of the Locations assigned to the group whose data is to be included in the response. + group_office_id: string + Specifies the owning office of the Location group. + category_office_id: string + Specifies the owning office of the Location group category. + + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = f"location/group/{loc_group_id}" + params = { + "office": office_id, + "category-id": category_id, + "category-office-id": category_office_id, + "group-office-id": group_office_id, + } + + response = api.get(endpoint, params, api_version=1) + return Data(response, selector="assigned-locations") + + +def get_location_groups( + office_id: Optional[str] = None, + include_assigned: Optional[bool] = True, + location_category_like: Optional[str] = None, + location_office_id: Optional[str] = None, + category_office_id: Optional[str] = None, +) -> Data: + """ + Retreives a list of location groups. + + Parameters + ---------- + office_id: string + Specifies the owning office of the location group whose data is to be included in the response.. + include_assigned: Boolean + Include the assigned location in the returned timeseries groups. (default: true) + location_category_like: string + Posix regular expression matching against the location category id + location_office_id: String + Specifies the owning office of the location assigned to the location group whose data is to be included in the response. + category_office_id: string + Specifies the owning office of the category the location group belongs to whose data is to be included in the response. + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = "location/group" + params = { + "office": office_id, + "include-assigned": include_assigned, + "location-category-like": location_category_like, + "location-office-id": location_office_id, + "category-office-id": category_office_id, + } + response = api.get(endpoint=endpoint, params=params, api_version=1) + return Data(response) + + +def store_location_groups(data: JSON) -> None: + """ + Create new Location Group + Parameters + ---------- + data: JSON dictionary + location group data to be stored. + + Returns + ------- + None + """ + + if data is None: + raise ValueError("Cannot store a standard text without timeseries group JSON") + + endpoint = "location/group" + + return api.post(endpoint=endpoint, data=data, api_version=1) + + +def update_location_group( + data: JSON, + group_id: str, + office_id: str, + replace_assigned_locs: Optional[bool] = False, +) -> None: + """ + Updates the location groups with the provided group ID and office ID. + + Parameters + ---------- + group_id : str + The group if of the location to be updated + office_id : str + The ID of the office associated with the specified location group. + replace_assigned_ts : bool, optional + Specifies whether to unassign all existing locations before assigning new locations specified in the content body. Default is False. + data: JSON dictionary + Location Group data to be stored. + + Returns + ------- + None + """ + + endpoint = f"location/group/{group_id}" + params = { + "replace-assigned-locs": replace_assigned_locs, + "office": office_id, + } + + api.patch(endpoint=endpoint, data=data, params=params, api_version=1) + + +def delete_location_group(group_id: str, category_id: str, office_id: str) -> None: + """Deletes requested time series group + + Parameters + ---------- + group_id: string + The location group to be deleted + category_id: string + Specifies the location category of the location group to be deleted + office_id: string + Specifies the owning office of the location group to be deleted + + Returns + ------- + None + """ + + endpoint = f"location/group/{group_id}" + params = { + "office": office_id, + "category-id": category_id, + } + + return api.delete(endpoint, params=params, api_version=1) diff --git a/cwms/locations/physical_locations.py b/cwms/locations/physical_locations.py index 3821e5fe..0b449983 100644 --- a/cwms/locations/physical_locations.py +++ b/cwms/locations/physical_locations.py @@ -7,14 +7,6 @@ from cwms.cwms_types import JSON, Data -def get_location_group(loc_group_id: str, category_id: str, office_id: str) -> Data: - endpoint = f"location/group/{loc_group_id}" - params = {"office": office_id, "category-id": category_id} - - response = api.get(endpoint, params, api_version=1) - return Data(response, selector="assigned-locations") - - def get_location(location_id: str, office_id: str, unit: str = "EN") -> Data: """ Get location data for a single location diff --git a/cwms/ratings/ratings.py b/cwms/ratings/ratings.py index 9aaa02a7..f5b36650 100644 --- a/cwms/ratings/ratings.py +++ b/cwms/ratings/ratings.py @@ -325,7 +325,7 @@ def update_ratings( if not isinstance(data, dict) and xml_heading not in data: raise ValueError( - "Cannot store a timeseries without a JSON data dictionaryor in XML" + "Cannot store a rating without a JSON data dictionary or in XML" ) if xml_heading in data: diff --git a/cwms/timeseries/timeseries.py b/cwms/timeseries/timeseries.py index e82a21f9..4eef62a6 100644 --- a/cwms/timeseries/timeseries.py +++ b/cwms/timeseries/timeseries.py @@ -9,145 +9,6 @@ from cwms.cwms_types import JSON, Data -def update_timeseries_groups( - data: JSON, - group_id: str, - office_id: str, - replace_assigned_ts: Optional[bool] = False, -) -> None: - """ - Updates the timeseries groups with the provided group ID and office ID. - - Parameters - ---------- - group_id : str - The new specified timeseries ID that will replace the old ID. - office_id : str - The ID of the office associated with the specified timeseries. - replace_assigned_ts : bool, optional - Specifies whether to unassign all existing time series before assigning new time series specified in the content body. Default is False. - data: JSON dictionary - Time Series data to be stored. - ```````````````````````````````````````` - Returns - ------- - None - """ - if not group_id: - raise ValueError("Cannot update a specified level without an id") - if not office_id: - raise ValueError("Cannot update a specified level without an office id") - - endpoint = f"timeseries/group/{group_id}" - params = { - "replace-assigned-ts": replace_assigned_ts, - "office": office_id, - } - - api.patch(endpoint=endpoint, data=data, params=params, api_version=1) - - -def timeseries_group_df_to_json( - data: pd.DataFrame, - group_id: str, - office_id: str, - category_id: str, -) -> JSON: - """ - Converts a dataframe to a json dictionary in the correct format. - - Parameters - ---------- - data: pd.DataFrame - Dataframe containing timeseries information. - group_id: str - The group ID for the timeseries. - office_id: str - The ID of the office associated with the specified timeseries. - category_id: str - The ID of the category associated with the group - - Returns - ------- - JSON - JSON dictionary of the timeseries data. - """ - df = data.copy() - required_columns = ["office-id", "timeseries-id"] - optional_columns = ["alias-id", "attribute", "ts-code"] - for column in required_columns: - if column not in df.columns: - raise TypeError( - f"{column} is a required column in data when posting as a dataframe" - ) - - if df[required_columns].isnull().any().any(): - raise ValueError( - f"Null/NaN values found in required columns: {required_columns}. " - ) - - # Fill optional columns with default values if missing - if "alias-id" not in df.columns: - df["alias-id"] = None - if "attribute" not in df.columns: - df["attribute"] = 0 - - # Replace NaN with None for optional columns - for column in optional_columns: - if column in df.columns: - data[column] = df[column].where(pd.notnull(df[column]), None) - - # Build the list of time-series entries - assigned_time_series = df.apply( - lambda entry: { - "office-id": entry["office-id"], - "timeseries-id": entry["timeseries-id"], - "alias-id": entry["alias-id"], - "attribute": entry["attribute"], - **( - {"ts-code": entry["ts-code"]} - if "ts-code" in entry and pd.notna(entry["ts-code"]) - else {} - ), - }, - axis=1, - ).tolist() - - # Construct the final JSON dictionary - json_dict = { - "office-id": office_id, - "id": group_id, - "time-series-category": {"office-id": office_id, "id": category_id}, - "assigned-time-series": assigned_time_series, - } - - return json_dict - - -def get_timeseries_group(group_id: str, category_id: str, office_id: str) -> Data: - """Retreives time series stored in the requested time series group - - Parameters - ---------- - group_id: string - Timeseries group whose data is to be included in the response. - category_id: string - The category id that contains the timeseries group. - office_id: string - The owning office of the timeseries group. - - Returns - ------- - cwms data type. data.json will return the JSON output and data.df will return a dataframe - """ - - endpoint = f"timeseries/group/{group_id}" - params = {"office": office_id, "category-id": category_id} - - response = api.get(endpoint=endpoint, params=params, api_version=1) - return Data(response, selector="assigned-time-series") - - def get_multi_timeseries_df( ts_ids: list[str], office_id: str, diff --git a/cwms/timeseries/timeseries_bin.py b/cwms/timeseries/timeseries_bin.py index 10f2c4d9..96f77b28 100644 --- a/cwms/timeseries/timeseries_bin.py +++ b/cwms/timeseries/timeseries_bin.py @@ -88,18 +88,6 @@ def get_binary_timeseries( return Data(response) -def get_large_blob(url: str) -> bytes: - """ - Retrieves large blob data greater than 64kb from CWMS data api - :param url: str - Url used in query by CDA - :return: bytes - Large binary data - """ - response = requests.get(url) - return response.content - - def store_binary_timeseries(data: JSON, replace_all: bool = False) -> None: """ This method is used to store a binary time series through CWMS Data API. diff --git a/cwms/timeseries/timeseries_group.py b/cwms/timeseries/timeseries_group.py new file mode 100644 index 00000000..a6ba3ed9 --- /dev/null +++ b/cwms/timeseries/timeseries_group.py @@ -0,0 +1,253 @@ +import threading +from datetime import datetime +from typing import Any, Dict, Optional + +import pandas as pd +from pandas import DataFrame + +import cwms.api as api +from cwms.cwms_types import JSON, Data + + +def get_timeseries_group( + group_id: str, + category_id: str, + office_id: str, + group_office_id: str, + category_office_id: str, +) -> Data: + """Retreives time series stored in the requested time series group + + Parameters + ---------- + group_id: string + Timeseries group whose data is to be included in the response. + category_id: string + The category id that contains the timeseries group. + office_id: string + The owning office of the timeseries assigned to the group whose data is to be included in the response. + group_office_id: string + Specifies the owning office of the timeseries group. + category_office_id: string + Specifies the owning office of the timeseries group category. + + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = f"timeseries/group/{group_id}" + params = { + "office": office_id, + "category-id": category_id, + "category-office-id": category_office_id, + "group-office-id": group_office_id, + } + + response = api.get(endpoint=endpoint, params=params, api_version=1) + return Data(response, selector="assigned-time-series") + + +def get_timeseries_groups( + office_id: Optional[str] = None, + include_assigned: Optional[bool] = True, + timeseries_category_like: Optional[str] = None, + timeseries_group_like: Optional[str] = None, + category_office_id: Optional[str] = None, +) -> Data: + """ + Retreives a list of time series groups. + + Parameters + ---------- + category_id: string + The category id that contains the timeseries group. + include_assigned: Boolean + Include the assigned timeseries in the returned timeseries groups. (default: true) + timeseries_category_like: string + Posix regular expression matching against the timeseries category id + timeseries_group_like: String + Posix regular expression matching against the timeseries group id + category_office_id: string + Specifies the owning office of the timeseries group category + Returns + ------- + cwms data type. data.json will return the JSON output and data.df will return a dataframe + """ + + endpoint = "timeseries/group" + params = { + "office": office_id, + "include-assigned": include_assigned, + "timeseries-category-like": timeseries_category_like, + "timeseries_group_like": timeseries_group_like, + "category-office-id": category_office_id, + } + response = api.get(endpoint=endpoint, params=params, api_version=1) + return Data(response) + + +def timeseries_group_df_to_json( + data: pd.DataFrame, + group_id: str, + group_office_id: str, + category_office_id: str, + category_id: str, +) -> JSON: + """ + Converts a dataframe to a json dictionary in the correct format. + + Parameters + ---------- + data: pd.DataFrame + Dataframe containing timeseries information. + group_id: str + The group ID for the timeseries. + office_id: str + The ID of the office associated with the specified timeseries. + category_id: str + The ID of the category associated with the group + + Returns + ------- + JSON + JSON dictionary of the timeseries data. + """ + df = data.copy() + required_columns = ["office-id", "timeseries-id"] + optional_columns = ["alias-id", "attribute", "ts-code"] + for column in required_columns: + if column not in df.columns: + raise TypeError( + f"{column} is a required column in data when posting as a dataframe" + ) + + if df[required_columns].isnull().any().any(): + raise ValueError( + f"Null/NaN values found in required columns: {required_columns}. " + ) + + # Fill optional columns with default values if missing + if "alias-id" not in df.columns: + df["alias-id"] = None + if "attribute" not in df.columns: + df["attribute"] = 0 + + # Replace NaN with None for optional columns + for column in optional_columns: + if column in df.columns: + data[column] = df[column].where(pd.notnull(df[column]), None) + + # Build the list of time-series entries + assigned_time_series = df.apply( + lambda entry: { + "office-id": entry["office-id"], + "timeseries-id": entry["timeseries-id"], + "alias-id": entry["alias-id"], + "attribute": entry["attribute"], + **( + {"ts-code": entry["ts-code"]} + if "ts-code" in entry and pd.notna(entry["ts-code"]) + else {} + ), + }, + axis=1, + ).tolist() + + # Construct the final JSON dictionary + json_dict = { + "office-id": group_office_id, + "id": group_id, + "time-series-category": {"office-id": category_office_id, "id": category_id}, + "assigned-time-series": assigned_time_series, + } + + return json_dict + + +def store_timeseries_groups(data: JSON, fail_if_exists: Optional[bool] = True) -> None: + """ + Create new TimeSeriesGroup + Parameters + ---------- + data: JSON dictionary + Time Series data to be stored. + fail_if_exists: Boolean Defualt = True + Create will fail if provided ID already exists. + + Returns + ------- + None + """ + + if data is None: + raise ValueError("Cannot store a standard text without timeseries group JSON") + + endpoint = "timeseries/group" + params = {"fail-if-exists": fail_if_exists} + + return api.post(endpoint, data, params, api_version=1) + + +def update_timeseries_groups( + data: JSON, + group_id: str, + office_id: str, + replace_assigned_ts: Optional[bool] = False, +) -> None: + """ + Updates the timeseries groups with the provided group ID and office ID. + + Parameters + ---------- + group_id : str + The group id of the timeseries group to be updates + office_id : str + The ID of the office associated with the timeseries group. + replace_assigned_ts : bool, optional + Specifies whether to unassign all existing timeseries before assigning new timeseries specified in the content body. Default is False. + data: JSON dictionary + Timeseries data to be stored. + + Returns + ------- + None + """ + if not group_id: + raise ValueError("Cannot update a timeseries groups without an id") + if not office_id: + raise ValueError("Cannot update a timeseries groups without an office id") + + endpoint = f"timeseries/group/{group_id}" + params = { + "replace-assigned-ts": replace_assigned_ts, + "office": office_id, + } + + api.patch(endpoint=endpoint, data=data, params=params, api_version=1) + + +def delete_timeseries_group(group_id: str, category_id: str, office_id: str) -> None: + """Deletes requested time series group + + Parameters + ---------- + group_id: string + The time series group to be deleted + category_id: string + Specifies the time series category of the time series group to be deleted + office_id: string + Specifies the owning office of the time series group to be deleted + + Returns + ------- + None + """ + + endpoint = f"timeseries/group/{group_id}" + params = { + "office": office_id, + "category-id": category_id, + } + + return api.delete(endpoint, params=params, api_version=1) diff --git a/cwms/timeseries/timeseries_txt.py b/cwms/timeseries/timeseries_txt.py index 211d4968..b9c5b739 100644 --- a/cwms/timeseries/timeseries_txt.py +++ b/cwms/timeseries/timeseries_txt.py @@ -78,20 +78,6 @@ def get_text_timeseries( return Data(response) -def get_large_clob(url: str, encoding: str = "utf-8") -> str: - """ - Retrieves large clob data greater than 64kb from CWMS data api - :param url: str - Url used in query by CDA - :param encoding: str, optional - Encoding used to decode text data. Default utf-8 - :return: str - Large text data - """ - response = requests.get(url) - return response.content.decode(encoding) - - def store_text_timeseries(data: JSON, replace_all: bool = False) -> None: """ This method is used to store a text time series through CWMS Data API. diff --git a/pyproject.toml b/pyproject.toml index 3a05ca7a..45da3fcf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "cwms-python" -version = "0.5.6" +version = "0.5.7" packages = [ { include = "cwms" }, diff --git a/tests/locations/physical_locations_test.py b/tests/locations/physical_locations_test.py index 2ef5a9ea..a085164e 100644 --- a/tests/locations/physical_locations_test.py +++ b/tests/locations/physical_locations_test.py @@ -2,6 +2,7 @@ import pytest import cwms.api +import cwms.locations.location_groups as location_groups import cwms.locations.physical_locations as locations _MOCK_ROOT = "https://mockwebserver.cwms.gov" @@ -36,18 +37,28 @@ def init_session(): def test_get_location_group(requests_mock): - group_id = "test-location-group" + loc_group_id = "test-location-group" category_id = "test-location-category" office_id = "test-office" + category_office_id = "test-office" + group_office_id = "test-office" requests_mock.get( - f"{_MOCK_ROOT}/location/group/{group_id}?" + f"{_MOCK_ROOT}/location/group/{loc_group_id}?" f"office={office_id}&" - f"category-id={category_id}", + f"category-id={category_id}&" + f"category-office-id={category_office_id}&" + f"group-office-id={group_office_id}", json=EXAMPLE_LOCATION_GROUP, ) - data = locations.get_location_group(group_id, category_id, office_id) + data = location_groups.get_location_group( + loc_group_id=loc_group_id, + category_id=category_id, + office_id=office_id, + category_office_id=category_office_id, + group_office_id=group_office_id, + ) assert data.json == EXAMPLE_LOCATION_GROUP assert type(data.df) is pd.DataFrame diff --git a/tests/resources/time_series_group.json b/tests/resources/time_series_group.json index 7910317d..711e926f 100644 --- a/tests/resources/time_series_group.json +++ b/tests/resources/time_series_group.json @@ -42,48 +42,6 @@ "ts-code": 7009109, "alias-id": "59870", "attribute": 0 - }, - { - "office-id": "SWG", - "timeseries-id": "Addicks-Gate 3.Flow.Inst.15Minutes.0.USGS-Rev", - "ts-code": 32384149, - "alias-id": "300027", - "attribute": 0 - }, - { - "office-id": "SWG", - "timeseries-id": "Addicks-Gate 1.Flow.Inst.15Minutes.0.USGS-Rev", - "ts-code": 32381149, - "alias-id": "300028", - "attribute": 0 - }, - { - "office-id": "SWG", - "timeseries-id": "Addicks-Gate 2.Flow.Inst.15Minutes.0.USGS-Rev", - "ts-code": 32382149, - "alias-id": "300029", - "attribute": 0 - }, - { - "office-id": "SWG", - "timeseries-id": "Addicks-Gated_Total.Flow.Inst.15Minutes.0.USGS-Rev", - "ts-code": 32408149, - "alias-id": "300048", - "attribute": 0 - }, - { - "office-id": "SWG", - "timeseries-id": "Barker-Gate 1.Flow.Inst.15Minutes.0.USGS-Rev", - "ts-code": 32379149, - "alias-id": "299976", - "attribute": 0 - }, - { - "office-id": "SWG", - "timeseries-id": "Barker-Gated_Total.Flow.Inst.15Minutes.0.USGS-Rev", - "ts-code": 32407149, - "alias-id": "299978", - "attribute": 0 } ] } \ No newline at end of file diff --git a/tests/timeseries/timeseries_test.py b/tests/timeseries/timeseries_test.py index 2ca21f76..6672c821 100644 --- a/tests/timeseries/timeseries_test.py +++ b/tests/timeseries/timeseries_test.py @@ -11,6 +11,7 @@ import cwms.api import cwms.timeseries.timeseries as timeseries +import cwms.timeseries.timeseries_group as timeseries_group from tests._test_utils import read_resource_file _MOCK_ROOT = "https://mockwebserver.cwms.gov" @@ -40,7 +41,7 @@ def test_update_timeseries_groups(requests_mock): status_code=200, ) - timeseries.update_timeseries_groups( + timeseries_group.update_timeseries_groups( data=data, group_id=group_id, office_id=office_id, @@ -94,8 +95,12 @@ def test_timeseries_group_df_to_json_valid_data(): ], } - result = timeseries.timeseries_group_df_to_json( - data, "group123", "office123", "cat123" + result = timeseries_group.timeseries_group_df_to_json( + data=data, + group_id="group123", + group_office_id="office123", + category_office_id="office123", + category_id="cat123", ) assert result == expected_json @@ -247,25 +252,42 @@ def test_get_timeseries_paging(requests_mock): def test_get_timeseries_group_default(requests_mock): + group_id = "USGS TS Data Acquisition" + category_id = "Data Acquisition" + office_id = "LRL" + category_office_id = "CMWS" + group_office_id = "CWMS" + + requests_mock.get( + f"{_MOCK_ROOT}/timeseries/group/{group_id}?" + f"office={office_id}&" + f"category-id={category_id}&" + f"category-office-id={category_office_id}&" + f"group-office-id={group_office_id}", + json=_TS_GROUP, + ) + + """ requests_mock.get( f"{_MOCK_ROOT}" "/timeseries/group/USGS%20TS%20Data%20Acquisition?office=CWMS&" "category-id=Data%20Acquisition", json=_TS_GROUP, ) + """ - group_id = "USGS TS Data Acquisition" - category_id = "Data Acquisition" - office_id = "CWMS" - - data = timeseries.get_timeseries_group( - group_id=group_id, category_id=category_id, office_id=office_id + data = timeseries_group.get_timeseries_group( + group_id=group_id, + category_id=category_id, + office_id=office_id, + category_office_id=category_office_id, + group_office_id=group_office_id, ) assert data.json == _TS_GROUP assert type(data.df) is pd.DataFrame assert "timeseries-id" in data.df.columns - assert data.df.shape == (11, 5) + assert data.df.shape == (5, 5) values = data.df.to_numpy().tolist() assert values[0] == [ "LRL",