From 4810bbe1e29c59d142b65e3ff1489bae42457cf2 Mon Sep 17 00:00:00 2001 From: msweier Date: Wed, 16 Jul 2025 14:17:08 -0500 Subject: [PATCH 01/14] add location_group_df_to_json --- cwms/locations/location_groups.py | 76 +++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/cwms/locations/location_groups.py b/cwms/locations/location_groups.py index cbbf650f..bd095680 100644 --- a/cwms/locations/location_groups.py +++ b/cwms/locations/location_groups.py @@ -84,6 +84,82 @@ def get_location_groups( response = api.get(endpoint=endpoint, params=params, api_version=1) return Data(response) +def location_group_df_to_json( + data: pd.DataFrame, + group_id: str, + group_office_id: str, + category_office_id: str, + category_id: str, +) -> JSON: + """ + Converts a dataframe to a json dictionary in the correct format. + + Parameters + ---------- + data: pd.DataFrame + Dataframe containing timeseries information. + group_id: str + The group ID for the timeseries. + office_id: str + The ID of the office associated with the specified timeseries. + category_id: str + The ID of the category associated with the group + + Returns + ------- + JSON + JSON dictionary of the timeseries data. + """ + df = data.copy() + required_columns = ["office-id", "location-id"] + optional_columns = ["alias-id", "attribute", "ref-location-id"] + for column in required_columns: + if column not in df.columns: + raise TypeError( + f"{column} is a required column in data when posting as a dataframe" + ) + + if df[required_columns].isnull().any().any(): + raise ValueError( + f"Null/NaN values found in required columns: {required_columns}. " + ) + + # Fill optional columns with default values if missing + if "alias-id" not in df.columns: + df["alias-id"] = None + if "attribute" not in df.columns: + df["attribute"] = 0 + + # Replace NaN with None for optional columns + for column in optional_columns: + if column in df.columns: + data[column] = df[column].where(pd.notnull(df[column]), None) + + # Build the list of time-series entries + assigned_locs = df.apply( + lambda entry: { + "office-id": entry["office-id"], + "location-id": entry["location-id"], + "alias-id": entry["alias-id"], + "attribute": entry["attribute"], + **( + {"ref-location-id": entry["ref-location-id"]} + if "ref-location-id" in entry and pd.notna(entry["ref-location-id"]) + else {} + ), + }, + axis=1, + ).tolist() + + # Construct the final JSON dictionary + json_dict = { + "office-id": group_office_id, + "id": group_id, + "location-category": {"office-id": category_office_id, "id": category_id}, + "assigned-locations": assigned_locs, + } + + return json_dict def store_location_groups(data: JSON) -> None: """ From 4babc96a7d3b4504b3bd51bc0f0eba1b6f5130b3 Mon Sep 17 00:00:00 2001 From: msweier Date: Wed, 16 Jul 2025 14:52:22 -0500 Subject: [PATCH 02/14] add cascade delete --- cwms/locations/location_groups.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cwms/locations/location_groups.py b/cwms/locations/location_groups.py index bd095680..14ba3940 100644 --- a/cwms/locations/location_groups.py +++ b/cwms/locations/location_groups.py @@ -84,6 +84,7 @@ def get_location_groups( response = api.get(endpoint=endpoint, params=params, api_version=1) return Data(response) + def location_group_df_to_json( data: pd.DataFrame, group_id: str, @@ -161,6 +162,7 @@ def location_group_df_to_json( return json_dict + def store_location_groups(data: JSON) -> None: """ Create new Location Group @@ -216,7 +218,12 @@ def update_location_group( api.patch(endpoint=endpoint, data=data, params=params, api_version=1) -def delete_location_group(group_id: str, category_id: str, office_id: str) -> None: +def delete_location_group( + group_id: str, + category_id: str, + office_id: str, + cascade_delete: Optional[bool] = False, +) -> None: """Deletes requested time series group Parameters @@ -237,6 +244,7 @@ def delete_location_group(group_id: str, category_id: str, office_id: str) -> No params = { "office": office_id, "category-id": category_id, + "cascade-delete": cascade_delete, } return api.delete(endpoint, params=params, api_version=1) From f38ca6bb9f29ca308187691bc3c478e965a50117 Mon Sep 17 00:00:00 2001 From: msweier Date: Wed, 16 Jul 2025 14:52:40 -0500 Subject: [PATCH 03/14] add cwms group example notebook --- examples/06_cwms_groups_examples.ipynb | 837 +++++++++++++++++++++++++ 1 file changed, 837 insertions(+) create mode 100644 examples/06_cwms_groups_examples.ipynb diff --git a/examples/06_cwms_groups_examples.ipynb b/examples/06_cwms_groups_examples.ipynb new file mode 100644 index 00000000..b8cff940 --- /dev/null +++ b/examples/06_cwms_groups_examples.ipynb @@ -0,0 +1,837 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#libraries\n", + "import os\n", + "import pandas as pd\n", + "# import sys\n", + "# sys.path.insert(0,r'C:\\\\code\\\\cwms-python')\n", + "import cwms\n", + "from dotenv import load_dotenv\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# grab API variables from .env file\n", + "load_dotenv()\n", + "APIROOT = os.getenv(\"API_ROOT\")\n", + "OFFICE = os.getenv(\"OFFICE\")\n", + "APIKEY = os.getenv('API_KEY')\n", + "# connect to T7\n", + "apiKey = \"apikey \" + APIKEY\n", + "api = cwms.api.init_session(api_root=APIROOT, api_key=apiKey)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create timeseries group\n", + "You can skip this step if your group is already created. \n", + "You will likely only have write access to your office so your group must use your office ID. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "category_id = \"Test Category Name\"\n", + "cat_desc = \"test cat description\"\n", + "group_id = \"Test Group Name\"\n", + "group_desc = \"test group description\"\n", + "# get example json from swagger and edit\n", + "\n", + "\n", + "tsGroup_data = {\n", + " \"office-id\": OFFICE,\n", + " \"id\": group_id,\n", + " \"time-series-category\": {\n", + " \"office-id\": OFFICE,\n", + " \"id\": category_id,\n", + " \"description\": cat_desc\n", + " },\n", + " \"description\": group_desc,\n", + "# \"shared-alias-id\": \"string\",\n", + "# \"shared-ref-ts-id\": \"string\",\n", + "# \"assigned-time-series\": [\n", + "# {\n", + "# \"officeId\": \"string\",\n", + "# \"timeseriesId\": \"string\",\n", + "# \"aliasId\": \"string\",\n", + "# \"refTsId\": \"string\",\n", + "# \"attribute\": 0\n", + "# }\n", + "# ]\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "cwms.store_timeseries_groups(data = tsGroup_data, fail_if_exists = False)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'office-id': 'MVP',\n", + " 'id': 'Test Group Name',\n", + " 'time-series-category': {'office-id': 'MVP', 'id': 'Test Category Name'},\n", + " 'description': 'test group description',\n", + " 'assigned-time-series': []}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cwms.get_timeseries_group(group_id=group_id, \n", + " category_id=category_id, \n", + " office_id=OFFICE, \n", + " category_office_id=OFFICE, \n", + " group_office_id=OFFICE).json" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Add timeseries to Group" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
timeseries-idoffice-idalias
0Baldhill_Dam.Flow-In.Inst.~15Minutes.0.bestMVP
1Baldhill_Dam.Flow-Out.Inst.~15Minutes.0.bestMVP
2Baldhill_Dam.Stage.Inst.~15Minutes.0.bestMVP
3Baldhill_Dam-Tailwater.Stage.Inst.~15Minutes.0...MVP
4ChippewaDiv_Dam.Flow-Out.Inst.~15Minutes.0.bestMVP
5ChippewaDiv_Dam.Stage.Inst.~15Minutes.0.bestMVP
6ChippewaDiv_Dam-Tailwater.Stage.Inst.~15Minute...MVP
7EauGalle_Dam.Flow-In.Inst.~15Minutes.0.bestMVP
8EauGalle_Dam.Flow-Out.Inst.~15Minutes.0.bestMVP
9EauGalle_Dam.Stage.Inst.~15Minutes.0.bestMVP
10EauGalle_Dam-Tailwater.Stage.Inst.~15Minutes.0...MVP
\n", + "
" + ], + "text/plain": [ + " timeseries-id office-id alias\n", + "0 Baldhill_Dam.Flow-In.Inst.~15Minutes.0.best MVP \n", + "1 Baldhill_Dam.Flow-Out.Inst.~15Minutes.0.best MVP \n", + "2 Baldhill_Dam.Stage.Inst.~15Minutes.0.best MVP \n", + "3 Baldhill_Dam-Tailwater.Stage.Inst.~15Minutes.0... MVP \n", + "4 ChippewaDiv_Dam.Flow-Out.Inst.~15Minutes.0.best MVP \n", + "5 ChippewaDiv_Dam.Stage.Inst.~15Minutes.0.best MVP \n", + "6 ChippewaDiv_Dam-Tailwater.Stage.Inst.~15Minute... MVP \n", + "7 EauGalle_Dam.Flow-In.Inst.~15Minutes.0.best MVP \n", + "8 EauGalle_Dam.Flow-Out.Inst.~15Minutes.0.best MVP \n", + "9 EauGalle_Dam.Stage.Inst.~15Minutes.0.best MVP \n", + "10 EauGalle_Dam-Tailwater.Stage.Inst.~15Minutes.0... MVP " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ts_to_add = '''Baldhill_Dam.Flow-In.Inst.~15Minutes.0.best\n", + "Baldhill_Dam.Flow-Out.Inst.~15Minutes.0.best\n", + "Baldhill_Dam.Stage.Inst.~15Minutes.0.best\n", + "Baldhill_Dam-Tailwater.Stage.Inst.~15Minutes.0.best\n", + "ChippewaDiv_Dam.Flow-Out.Inst.~15Minutes.0.best\n", + "ChippewaDiv_Dam.Stage.Inst.~15Minutes.0.best\n", + "ChippewaDiv_Dam-Tailwater.Stage.Inst.~15Minutes.0.best\n", + "EauGalle_Dam.Flow-In.Inst.~15Minutes.0.best\n", + "EauGalle_Dam.Flow-Out.Inst.~15Minutes.0.best\n", + "EauGalle_Dam.Stage.Inst.~15Minutes.0.best\n", + "EauGalle_Dam-Tailwater.Stage.Inst.~15Minutes.0.best\n", + " '''\n", + "\n", + "data = []\n", + "# loop through the ts ids\n", + "for line in ts_to_add.strip().split('\\n'):\n", + " cleaned_line = line.strip() # Remove whitespace from the current line\n", + "\n", + " if not cleaned_line: # Skip empty lines that might result from extra newlines\n", + " continue\n", + "\n", + " ts_id = cleaned_line\n", + " # append data dict\n", + " data.append({\n", + " 'timeseries-id': ts_id,\n", + " 'office-id': OFFICE,\n", + " 'alias': ''\n", + " })\n", + "\n", + "# Create the pandas DataFrame\n", + "df = pd.DataFrame(data)\n", + "\n", + "df\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'office-id': 'MVP',\n", + " 'id': 'Test Group Name',\n", + " 'time-series-category': {'office-id': 'MVP', 'id': 'Test Category Name'},\n", + " 'assigned-time-series': [{'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam.Flow-In.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam.Flow-Out.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam.Stage.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam-Tailwater.Stage.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'ChippewaDiv_Dam.Flow-Out.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'ChippewaDiv_Dam.Stage.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'ChippewaDiv_Dam-Tailwater.Stage.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam.Flow-In.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam.Flow-Out.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam.Stage.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam-Tailwater.Stage.Inst.~15Minutes.0.best',\n", + " 'alias-id': None,\n", + " 'attribute': 0}]}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# create the json_dictionary\n", + "json_dict = cwms.timeseries_group_df_to_json(\n", + " data=df,\n", + " group_id=group_id,\n", + " group_office_id=OFFICE,\n", + " category_office_id=OFFICE,\n", + " category_id=category_id,\n", + " )\n", + "\n", + "json_dict" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# update the group\n", + "cwms.update_timeseries_groups(\n", + " group_id=group_id,\n", + " office_id=OFFICE,\n", + " replace_assigned_ts=False,\n", + " data=json_dict,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'office-id': 'MVP',\n", + " 'id': 'Test Group Name',\n", + " 'time-series-category': {'office-id': 'MVP', 'id': 'Test Category Name'},\n", + " 'description': 'test group description',\n", + " 'assigned-time-series': [{'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam-Tailwater.Stage.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam.Flow-In.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam.Flow-Out.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'Baldhill_Dam.Stage.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam-Tailwater.Stage.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'ChippewaDiv_Dam.Stage.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam.Flow-Out.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam.Stage.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'ChippewaDiv_Dam-Tailwater.Stage.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'ChippewaDiv_Dam.Flow-Out.Inst.~15Minutes.0.best',\n", + " 'attribute': 0},\n", + " {'office-id': 'MVP',\n", + " 'timeseries-id': 'EauGalle_Dam.Flow-In.Inst.~15Minutes.0.best',\n", + " 'attribute': 0}]}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cwms.get_timeseries_group(group_id=group_id, \n", + " category_id=category_id, \n", + " office_id=OFFICE, \n", + " category_office_id=OFFICE, \n", + " group_office_id=OFFICE).json" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Delete timeseries group\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# define function of steps needed to delete group\n", + "def delete_timeseries_group_members(group_id=str, \n", + " group_office_id=str, \n", + " category_office_id=str,\n", + " category_id=str ):\n", + "\n", + " # remove all ts ids from the group\n", + "\n", + "\n", + " # create empty df\n", + " df = pd.DataFrame(columns= ['timeseries-id',\n", + " 'office-id',\n", + " 'alias']) \n", + "\n", + " # create empty json dictionary\n", + "\n", + " \n", + " json_dict = cwms.timeseries_group_df_to_json(\n", + " data=df,\n", + " group_id=group_id,\n", + " group_office_id=group_office_id,\n", + " category_office_id=category_office_id,\n", + " category_id=category_id,\n", + " )\n", + "\n", + " # update the group with no data and set replace_assigned_ts flag as True\n", + " cwms.update_timeseries_groups(\n", + " group_id=group_id,\n", + " office_id=OFFICE,\n", + " replace_assigned_ts=True,\n", + " data=json_dict,\n", + " )\n", + " \n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# call function to delete all ts ids from group\n", + "delete_timeseries_group_members(group_id=group_id, \n", + " group_office_id=OFFICE, \n", + " category_office_id=OFFICE,\n", + " category_id=category_id )" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "# delete empty timeseries group\n", + "\n", + "cwms.delete_timeseries_group(group_id=group_id,\n", + " category_id=category_id,\n", + " office_id=OFFICE)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create location group\n", + "You can skip this step if your group is already created. \n", + "You will likely only have write access to your office so your group must use your office ID. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "category_id = \"Test Category Name\"\n", + "cat_desc = \"test cat description\"\n", + "group_id = \"Test Group Name\"\n", + "group_desc = \"test group description\"\n", + "# get example json from swagger and edit\n", + "\n", + "\n", + "locGroup_data = {\n", + " \"office-id\": OFFICE,\n", + " \"id\": group_id,\n", + " \"location-category\": {\n", + " \"office-id\": OFFICE,\n", + " \"id\": category_id,\n", + " \"description\": cat_desc\n", + " },\n", + " \"description\": group_desc,\n", + "# \"shared-loc-alias-id\": \"string\",\n", + "# \"shared-ref-location-id\": \"string\",\n", + " # \"loc-group-attribute\": 0,\n", + " # \"assigned-locations\": [\n", + " # {\n", + " # # \"location-id\": \"string\",\n", + " # # \"office-id\": \"string\",\n", + " # # \"alias-id\": \"string\",\n", + " # # \"attribute\": 0,\n", + " # # \"ref-location-id\": \"string\"\n", + " # }\n", + " # ]\n", + "}\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "cwms.store_location_groups(data = locGroup_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'office-id': 'MVP',\n", + " 'id': 'Test Group Name',\n", + " 'location-category': {'office-id': 'MVP', 'id': 'Test Category Name'},\n", + " 'description': 'test group description',\n", + " 'assigned-locations': []}" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cwms.get_location_group(loc_group_id=group_id, \n", + " category_id=category_id, \n", + " office_id=OFFICE, \n", + " category_office_id=OFFICE, \n", + " group_office_id=OFFICE).json" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Add Locations to Group" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
location-idoffice-idalias-idattributeref-location-id
0Baldhill_DamMVP
1ChippewaDiv_DamMVP
2EauGalle_DamMVP
\n", + "
" + ], + "text/plain": [ + " location-id office-id alias-id attribute ref-location-id\n", + "0 Baldhill_Dam MVP \n", + "1 ChippewaDiv_Dam MVP \n", + "2 EauGalle_Dam MVP " + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "locs_to_add = '''Baldhill_Dam\n", + "ChippewaDiv_Dam\n", + "EauGalle_Dam'''\n", + "\n", + "data = []\n", + "# loop through the loc ids\n", + "for line in locs_to_add.strip().split('\\n'):\n", + " cleaned_line = line.strip() # Remove whitespace from the current line\n", + "\n", + " if not cleaned_line: # Skip empty lines that might result from extra newlines\n", + " continue\n", + "\n", + " loc = cleaned_line\n", + " # append data dict\n", + " data.append({\n", + " \"location-id\": loc,\n", + " 'office-id': OFFICE,\n", + " \"alias-id\": '',\n", + " \"attribute\":'',\n", + " \"ref-location-id\":''\n", + " })\n", + "\n", + "# Create the pandas DataFrame\n", + "df = pd.DataFrame(data)\n", + "\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'office-id': 'MVP',\n", + " 'id': 'Test Group Name',\n", + " 'location-category': {'office-id': 'MVP', 'id': 'Test Category Name'},\n", + " 'assigned-locations': [{'office-id': 'MVP',\n", + " 'location-id': 'Baldhill_Dam',\n", + " 'alias-id': '',\n", + " 'attribute': '',\n", + " 'ref-location-id': ''},\n", + " {'office-id': 'MVP',\n", + " 'location-id': 'ChippewaDiv_Dam',\n", + " 'alias-id': '',\n", + " 'attribute': '',\n", + " 'ref-location-id': ''},\n", + " {'office-id': 'MVP',\n", + " 'location-id': 'EauGalle_Dam',\n", + " 'alias-id': '',\n", + " 'attribute': '',\n", + " 'ref-location-id': ''}]}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# create the json_dictionary\n", + "json_dict = cwms.location_group_df_to_json(\n", + " data=df,\n", + " group_id=group_id,\n", + " group_office_id=OFFICE,\n", + " category_office_id=OFFICE,\n", + " category_id=category_id,\n", + " )\n", + "\n", + "json_dict" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# update the group\n", + "cwms.update_location_group(\n", + " group_id=group_id,\n", + " office_id=OFFICE,\n", + " replace_assigned_locs=True,\n", + " data=json_dict,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'office-id': 'MVP',\n", + " 'id': 'Test Group Name',\n", + " 'location-category': {'office-id': 'MVP', 'id': 'Test Category Name'},\n", + " 'description': 'test group description',\n", + " 'assigned-locations': [{'location-id': 'Baldhill_Dam', 'office-id': 'MVP'},\n", + " {'location-id': 'EauGalle_Dam', 'office-id': 'MVP'},\n", + " {'location-id': 'ChippewaDiv_Dam', 'office-id': 'MVP'}]}" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cwms.get_location_group(loc_group_id=group_id, \n", + " category_id=category_id, \n", + " office_id=OFFICE, \n", + " category_office_id=OFFICE, \n", + " group_office_id=OFFICE).json" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Delete location group\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "# cascade delete location group\n", + "\n", + "cwms.delete_location_group(group_id=group_id,\n", + " category_id=category_id,\n", + " office_id=OFFICE, \n", + " cascade_delete=True # this will delete all locations and group\n", + " )" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 440a473ed10c17052e9df18427c3f74d1643d988 Mon Sep 17 00:00:00 2001 From: msweier Date: Wed, 16 Jul 2025 14:53:32 -0500 Subject: [PATCH 04/14] bump version to 0.8.1 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e799e8a1..1b0ec00d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "cwms-python" repository = "https://github.com/HydrologicEngineeringCenter/cwms-python" -version = "0.8.0" +version = "0.8.1" packages = [ From af804cac19dbc0e1ab612ceae6964aca12815d24 Mon Sep 17 00:00:00 2001 From: msweier Date: Mon, 28 Jul 2025 13:23:57 -0500 Subject: [PATCH 05/14] add test --- tests/cda/locations/location_groups_test.py | 159 ++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 tests/cda/locations/location_groups_test.py diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py new file mode 100644 index 00000000..bfe8fb76 --- /dev/null +++ b/tests/cda/locations/location_groups_test.py @@ -0,0 +1,159 @@ +import pytest +import pandas as pd + +import cwms.locations.physical_locations as locations +import cwms.locations.location_groups as lg + +TEST_OFFICE = "SPK" +TEST_LOCATION_ID = "pytest_group-loc-123" +TEST_LATITUDE = 44.1 +TEST_LONGITUDE = -93.1 + + +BASE_LOCATION_DATA = { + "name": TEST_LOCATION_ID, + "office-id": TEST_OFFICE, + "latitude": TEST_LATITUDE, + "longitude": TEST_LONGITUDE, + "elevation": 250.0, + "horizontal-datum": "NAD83", + "vertical-datum": "NAVD88", + "location-type": "TESTING", + "public-name": "Test Location", + "long-name": "A pytest-generated location", + "timezone-name": "America/Los_Angeles", + "location-kind": "SITE", + "nation": "US", +} + + + + +TEST_CATEGORY_ID = "Test Category Name" +TEST_CAT_DESCRIPT = "test cat description" +TEST_GROUP_ID = "Test Group Name" +TEST_GROUP_DESCRIPT = "test group description" + + +LOC_GROUP_DATA = { + "office-id": TEST_OFFICE, + "id": TEST_GROUP_ID, + "location-category": { + "office-id": TEST_OFFICE, + "id": TEST_CATEGORY_ID, + "description": TEST_CAT_DESCRIPT, + }, + "description": TEST_GROUP_DESCRIPT, + +} + +LOC_GROUP_DATA_UPDATE = { + "office-id": TEST_OFFICE, + "id": TEST_GROUP_ID, + "location-category": { + "office-id": TEST_OFFICE, + "id": TEST_CATEGORY_ID, + "description": TEST_CAT_DESCRIPT, + }, + "description": TEST_GROUP_DESCRIPT, + "assigned-locations": [{"location-id": TEST_LOCATION_ID, + "office-id": TEST_OFFICE}] +} + + +@pytest.fixture(autouse=True) +def init_session(request): + print("Initializing CWMS API session for locations operations test...") + + +def test_store_location(): + locations.store_location(BASE_LOCATION_DATA) + df = locations.get_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE).df + assert TEST_LOCATION_ID in df["name"].values + + +def test_store_location_group(): + + lg.store_location_groups(data=LOC_GROUP_DATA) + data = lg.get_location_group( + loc_group_id=TEST_GROUP_ID, + category_id=TEST_CATEGORY_ID, + office_id=TEST_OFFICE, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + ).json + assert TEST_LOCATION_ID in data["id"] + assert TEST_CATEGORY_ID in data["location-category"]["id"] + +def test_get_location_groups(): + data = lg.get_location_groups( + office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + location_office_id=TEST_OFFICE, + location_category_like=TEST_CATEGORY_ID + ).json + assert TEST_CATEGORY_ID in data["location-category"]["id"] + + data = lg.get_location_group( + loc_group_id=TEST_GROUP_ID, + category_id=TEST_CATEGORY_ID, + office_id=TEST_OFFICE, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + ).json + assert TEST_LOCATION_ID in data["id"] + assert TEST_CATEGORY_ID in data["location-category"]["id"] + + +def test_location_group_df_to_json(): + data = [] + data.append( + { + "location-id": TEST_LOCATION_ID, + "office-id": TEST_OFFICE, + "alias-id": "", + "attribute": "", + "ref-location-id": "", + } + ) + + # Create the pandas DataFrame + df = pd.DataFrame(data) + + json_dict = lg.location_group_df_to_json( + data=df, + group_id=TEST_GROUP_ID, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + category_id=TEST_CATEGORY_ID, + ) + assert TEST_GROUP_ID in json_dict["id"] + assert TEST_LOCATION_ID in json_dict["assigned-locations"][0]["location-id"] + + +def test_update_location_group(): + lg.update_location_group( + group_id=TEST_GROUP_ID, + office_id=TEST_OFFICE, + replace_assigned_locs=True, + data=LOC_GROUP_DATA_UPDATE, + ) + data = lg.get_location_group( + loc_group_id=TEST_GROUP_ID, + category_id=TEST_CATEGORY_ID, + office_id=TEST_OFFICE, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + ).json + assert TEST_LOCATION_ID in data["id"] + assert TEST_CATEGORY_ID in data["location-category"]["id"] + + + +def test_delete_location(): + locations.store_location(BASE_LOCATION_DATA) + locations.delete_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE) + df_final = locations.get_locations( + office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID + ).df + assert df_final.empty or TEST_LOCATION_ID not in df_final.get("name", []) \ No newline at end of file From 93e8de7fc7eaad6aa0c74aa24eea9dbd4a12459d Mon Sep 17 00:00:00 2001 From: msweier Date: Mon, 28 Jul 2025 15:57:57 -0500 Subject: [PATCH 06/14] add location and ts group tests --- tests/cda/locations/location_groups_test.py | 38 ++-- .../cda/timeseries/timeseries_groups_test.py | 185 ++++++++++++++++++ 2 files changed, 202 insertions(+), 21 deletions(-) create mode 100644 tests/cda/timeseries/timeseries_groups_test.py diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index bfe8fb76..f4b44865 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -27,8 +27,6 @@ } - - TEST_CATEGORY_ID = "Test Category Name" TEST_CAT_DESCRIPT = "test cat description" TEST_GROUP_ID = "Test Group Name" @@ -44,7 +42,6 @@ "description": TEST_CAT_DESCRIPT, }, "description": TEST_GROUP_DESCRIPT, - } LOC_GROUP_DATA_UPDATE = { @@ -56,8 +53,7 @@ "description": TEST_CAT_DESCRIPT, }, "description": TEST_GROUP_DESCRIPT, - "assigned-locations": [{"location-id": TEST_LOCATION_ID, - "office-id": TEST_OFFICE}] + "assigned-locations": [{"location-id": TEST_LOCATION_ID, "office-id": TEST_OFFICE}], } @@ -82,27 +78,17 @@ def test_store_location_group(): group_office_id=TEST_OFFICE, category_office_id=TEST_OFFICE, ).json - assert TEST_LOCATION_ID in data["id"] assert TEST_CATEGORY_ID in data["location-category"]["id"] + def test_get_location_groups(): data = lg.get_location_groups( office_id=TEST_OFFICE, category_office_id=TEST_OFFICE, location_office_id=TEST_OFFICE, - location_category_like=TEST_CATEGORY_ID + location_category_like=TEST_CATEGORY_ID, ).json - assert TEST_CATEGORY_ID in data["location-category"]["id"] - - data = lg.get_location_group( - loc_group_id=TEST_GROUP_ID, - category_id=TEST_CATEGORY_ID, - office_id=TEST_OFFICE, - group_office_id=TEST_OFFICE, - category_office_id=TEST_OFFICE, - ).json - assert TEST_LOCATION_ID in data["id"] - assert TEST_CATEGORY_ID in data["location-category"]["id"] + assert TEST_CATEGORY_ID in data[0]["location-category"]["id"] def test_location_group_df_to_json(): @@ -145,9 +131,19 @@ def test_update_location_group(): group_office_id=TEST_OFFICE, category_office_id=TEST_OFFICE, ).json - assert TEST_LOCATION_ID in data["id"] - assert TEST_CATEGORY_ID in data["location-category"]["id"] + assert data["id"] == TEST_GROUP_ID + assert len(data["assigned-locations"]) == 1 + assert data["assigned-locations"][0]["location-id"] == TEST_LOCATION_ID + assert data["assigned-locations"][0]["office-id"] == TEST_OFFICE + +def test_delete_location_group(): + lg.delete_location_group( + group_id=TEST_GROUP_ID, + category_id=TEST_CATEGORY_ID, + office_id=TEST_OFFICE, + cascade_delete=True, + ) def test_delete_location(): @@ -156,4 +152,4 @@ def test_delete_location(): df_final = locations.get_locations( office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID ).df - assert df_final.empty or TEST_LOCATION_ID not in df_final.get("name", []) \ No newline at end of file + assert df_final.empty or TEST_LOCATION_ID not in df_final.get("name", []) diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py new file mode 100644 index 00000000..413cc347 --- /dev/null +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -0,0 +1,185 @@ +import pytest +import pandas as pd +import datetime + +import cwms.locations.physical_locations as locations +import cwms.timeseries.timeseries as timeseries +import cwms.timeseries.timeseries_group as tg + +TEST_OFFICE = "SPK" +TEST_LOCATION_ID = "pytest_group-loc-123" +TEST_LATITUDE = 44.1 +TEST_LONGITUDE = -93.1 + + +BASE_LOCATION_DATA = { + "name": TEST_LOCATION_ID, + "office-id": TEST_OFFICE, + "latitude": TEST_LATITUDE, + "longitude": TEST_LONGITUDE, + "elevation": 250.0, + "horizontal-datum": "NAD83", + "vertical-datum": "NAVD88", + "location-type": "TESTING", + "public-name": "Test Location", + "long-name": "A pytest-generated location", + "timezone-name": "America/Los_Angeles", + "location-kind": "SITE", + "nation": "US", +} + +TEST_TSID = f"{TEST_LOCATION_ID}.Stage.Inst.15Minutes.0.test1" + +TS1_DATA = { + "name": TEST_TSID, + "units": "ft", + "office-id": TEST_OFFICE, + "values": [[1509654000000, 54.3, 0]], +} + +TS2_DATA = { + "name": f"{TEST_LOCATION_ID}.Stage.Inst.15Minutes.0.test1", + "office-id": TEST_OFFICE, + "values": [[1509654000000, 66.0, 0]], +} + + +TEST_CATEGORY_ID = "Test Category Name" +TEST_CAT_DESCRIPT = "test cat description" +TEST_GROUP_ID = "Test Group Name" +TEST_GROUP_DESCRIPT = "test group description" + + +TS_GROUP_DATA = { + "office-id": TEST_OFFICE, + "id": TEST_GROUP_ID, + "time-series-category": { + "office-id": TEST_OFFICE, + "id": TEST_CATEGORY_ID, + "description": TEST_CAT_DESCRIPT, + }, + "description": TEST_GROUP_DESCRIPT, +} + + +@pytest.fixture(autouse=True) +def init_session(request): + print("Initializing CWMS API session for locations operations test...") + + +def test_store_location(): + locations.store_location(BASE_LOCATION_DATA) + df = locations.get_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE).df + assert TEST_LOCATION_ID in df["name"].values + + +def test_store_timeseries(): + timeseries.store_timeseries(TS1_DATA) + data = timeseries.get_timeseries(ts_id=TEST_TSID, office_id=TEST_OFFICE).json + assert TEST_TSID in data["name"] + + +def test_store_timeseries_group(): + + tg.store_timeseries_groups(data=TS_GROUP_DATA) + data = tg.get_timeseries_group( + group_id=TEST_GROUP_ID, + category_id=TEST_CATEGORY_ID, + office_id=TEST_OFFICE, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + ).json + assert TEST_CATEGORY_ID in data["time-series-category"]["id"] + + +def test_get_timeseries_groups(): + data = tg.get_timeseries_groups( + office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + timeseries_category_like=TEST_CATEGORY_ID, + ).json + assert TEST_CATEGORY_ID in data[0]["time-series-category"]["id"] + + +def test_timeseries_group_df_to_json(): + data = [] + data.append({"timeseries-id": TEST_TSID, "office-id": TEST_OFFICE, "alias": ""}) + + # Create the pandas DataFrame + df = pd.DataFrame(data) + + json_dict = tg.timeseries_group_df_to_json( + data=df, + group_id=TEST_GROUP_ID, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + category_id=TEST_CATEGORY_ID, + ) + assert TEST_GROUP_ID in json_dict["id"] + assert TEST_LOCATION_ID in json_dict["assigned-time-series"][0]["timeseries-id"] + + +def test_update_timeseries_groups(): + data = [] + data.append({"timeseries-id": TEST_TSID, "office-id": TEST_OFFICE, "alias": ""}) + + # Create the pandas DataFrame + df = pd.DataFrame(data) + + json_dict = tg.timeseries_group_df_to_json( + data=df, + group_id=TEST_GROUP_ID, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + category_id=TEST_CATEGORY_ID, + ) + tg.update_timeseries_groups( + group_id=TEST_GROUP_ID, + office_id=TEST_OFFICE, + replace_assigned_ts=True, + data=json_dict, + ) + data = tg.get_timeseries_group( + group_id=TEST_GROUP_ID, + category_id=TEST_CATEGORY_ID, + category_office_id=TEST_OFFICE, + ).json + assert data["id"] == TEST_GROUP_ID + assert len(data["assigned-time-series"]) == 1 + assert data["assigned-time-series"][0]["timeseries-id"] == TEST_TSID + assert data["assigned-time-series"][0]["office-id"] == TEST_OFFICE + + +def test_delete_timeseries_group(): + + # update with no timeseries in the group first + df = pd.DataFrame(columns=["timeseries-id", "office-id", "alias"]) + + json_dict = tg.timeseries_group_df_to_json( + data=df, + group_id=TEST_GROUP_ID, + group_office_id=TEST_OFFICE, + category_office_id=TEST_OFFICE, + category_id=TEST_CATEGORY_ID, + ) + tg.update_timeseries_groups( + group_id=TEST_GROUP_ID, + office_id=TEST_OFFICE, + replace_assigned_ts=True, + data=json_dict, + ) + + # delete the group + tg.delete_timeseries_group( + group_id=TEST_GROUP_ID, category_id=TEST_CATEGORY_ID, office_id=TEST_OFFICE + ) + + +def test_delete_location(): + locations.delete_location( + location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True + ) + df_final = locations.get_locations( + office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID + ).df + assert df_final.empty or TEST_LOCATION_ID not in df_final.get("name", []) From b4bb252f2e804837eefcd3bcf8799eebbd0cc849 Mon Sep 17 00:00:00 2001 From: msweier Date: Mon, 28 Jul 2025 16:06:59 -0500 Subject: [PATCH 07/14] fix import order --- tests/cda/timeseries/timeseries_groups_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index 413cc347..4003b987 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -1,6 +1,6 @@ -import pytest import pandas as pd -import datetime +import pytest + import cwms.locations.physical_locations as locations import cwms.timeseries.timeseries as timeseries From fa5b88e6740e66d0f281d9f058c46812f15bde1d Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 29 Jul 2025 09:21:00 -0500 Subject: [PATCH 08/14] add cascade delete and switch office --- tests/cda/locations/location_groups_test.py | 13 +++++++------ tests/cda/timeseries/timeseries_groups_test.py | 8 ++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index f4b44865..4e21168b 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -1,13 +1,14 @@ -import pytest import pandas as pd +import pytest + import cwms.locations.physical_locations as locations import cwms.locations.location_groups as lg -TEST_OFFICE = "SPK" +TEST_OFFICE = "MVP" TEST_LOCATION_ID = "pytest_group-loc-123" -TEST_LATITUDE = 44.1 -TEST_LONGITUDE = -93.1 +TEST_LATITUDE = 45.1704758 +TEST_LONGITUDE = -92.8411439 BASE_LOCATION_DATA = { @@ -21,7 +22,7 @@ "location-type": "TESTING", "public-name": "Test Location", "long-name": "A pytest-generated location", - "timezone-name": "America/Los_Angeles", + "timezone-name": "America/Chicago", "location-kind": "SITE", "nation": "US", } @@ -148,7 +149,7 @@ def test_delete_location_group(): def test_delete_location(): locations.store_location(BASE_LOCATION_DATA) - locations.delete_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE) + locations.delete_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True) df_final = locations.get_locations( office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID ).df diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index 4003b987..0ce4faf5 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -6,10 +6,10 @@ import cwms.timeseries.timeseries as timeseries import cwms.timeseries.timeseries_group as tg -TEST_OFFICE = "SPK" +TEST_OFFICE = "MVP" TEST_LOCATION_ID = "pytest_group-loc-123" -TEST_LATITUDE = 44.1 -TEST_LONGITUDE = -93.1 +TEST_LATITUDE = 45.1704758 +TEST_LONGITUDE = -92.8411439 BASE_LOCATION_DATA = { @@ -23,7 +23,7 @@ "location-type": "TESTING", "public-name": "Test Location", "long-name": "A pytest-generated location", - "timezone-name": "America/Los_Angeles", + "timezone-name": "America/Chicago", "location-kind": "SITE", "nation": "US", } From 238e1d71301bfc56f49c2e34304f79c06b2a7d32 Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 29 Jul 2025 09:27:44 -0500 Subject: [PATCH 09/14] fix formatting --- tests/cda/locations/location_groups_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index 4e21168b..aa9367c5 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -149,7 +149,9 @@ def test_delete_location_group(): def test_delete_location(): locations.store_location(BASE_LOCATION_DATA) - locations.delete_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True) + locations.delete_location( + location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True + ) df_final = locations.get_locations( office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID ).df From 65d6bf106e8b12683f376a7374474bf7c918382f Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 29 Jul 2025 09:38:47 -0500 Subject: [PATCH 10/14] fix import order --- tests/cda/locations/location_groups_test.py | 4 +--- tests/cda/timeseries/timeseries_groups_test.py | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index aa9367c5..5fd28a43 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -1,9 +1,7 @@ import pandas as pd import pytest - - -import cwms.locations.physical_locations as locations import cwms.locations.location_groups as lg +import cwms.locations.physical_locations as locations TEST_OFFICE = "MVP" TEST_LOCATION_ID = "pytest_group-loc-123" diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index 0ce4faf5..e677b52c 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -1,7 +1,5 @@ import pandas as pd import pytest - - import cwms.locations.physical_locations as locations import cwms.timeseries.timeseries as timeseries import cwms.timeseries.timeseries_group as tg From 68739e65889c41f90725f3e84fd5b67a3bbc8e89 Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 29 Jul 2025 09:57:41 -0500 Subject: [PATCH 11/14] change to base location --- tests/cda/locations/location_groups_test.py | 2 +- tests/cda/timeseries/timeseries_groups_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index 5fd28a43..db98ae76 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -4,7 +4,7 @@ import cwms.locations.physical_locations as locations TEST_OFFICE = "MVP" -TEST_LOCATION_ID = "pytest_group-loc-123" +TEST_LOCATION_ID = "pytest_group" TEST_LATITUDE = 45.1704758 TEST_LONGITUDE = -92.8411439 diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index e677b52c..9a868426 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -5,7 +5,7 @@ import cwms.timeseries.timeseries_group as tg TEST_OFFICE = "MVP" -TEST_LOCATION_ID = "pytest_group-loc-123" +TEST_LOCATION_ID = "pytest_group" TEST_LATITUDE = 45.1704758 TEST_LONGITUDE = -92.8411439 From ff2fc3794ea185937c8e34f099ebef602c199041 Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 29 Jul 2025 10:02:51 -0500 Subject: [PATCH 12/14] fix sort --- tests/cda/locations/location_groups_test.py | 1 + tests/cda/timeseries/timeseries_groups_test.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index db98ae76..78ff67f1 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -1,5 +1,6 @@ import pandas as pd import pytest + import cwms.locations.location_groups as lg import cwms.locations.physical_locations as locations diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index 9a868426..7f74d3f9 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -1,5 +1,6 @@ import pandas as pd import pytest + import cwms.locations.physical_locations as locations import cwms.timeseries.timeseries as timeseries import cwms.timeseries.timeseries_group as tg From a7327f8f3d784172f8b1a9135cd4d7b3d065c9a5 Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 5 Aug 2025 12:06:02 -0500 Subject: [PATCH 13/14] update ts group to use module for locs and ts --- .../cda/timeseries/timeseries_groups_test.py | 103 ++++++++---------- 1 file changed, 46 insertions(+), 57 deletions(-) diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index 7f74d3f9..65b6a279 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -1,48 +1,13 @@ import pandas as pd import pytest -import cwms.locations.physical_locations as locations -import cwms.timeseries.timeseries as timeseries +import cwms import cwms.timeseries.timeseries_group as tg TEST_OFFICE = "MVP" TEST_LOCATION_ID = "pytest_group" -TEST_LATITUDE = 45.1704758 -TEST_LONGITUDE = -92.8411439 - - -BASE_LOCATION_DATA = { - "name": TEST_LOCATION_ID, - "office-id": TEST_OFFICE, - "latitude": TEST_LATITUDE, - "longitude": TEST_LONGITUDE, - "elevation": 250.0, - "horizontal-datum": "NAD83", - "vertical-datum": "NAVD88", - "location-type": "TESTING", - "public-name": "Test Location", - "long-name": "A pytest-generated location", - "timezone-name": "America/Chicago", - "location-kind": "SITE", - "nation": "US", -} - TEST_TSID = f"{TEST_LOCATION_ID}.Stage.Inst.15Minutes.0.test1" -TS1_DATA = { - "name": TEST_TSID, - "units": "ft", - "office-id": TEST_OFFICE, - "values": [[1509654000000, 54.3, 0]], -} - -TS2_DATA = { - "name": f"{TEST_LOCATION_ID}.Stage.Inst.15Minutes.0.test1", - "office-id": TEST_OFFICE, - "values": [[1509654000000, 66.0, 0]], -} - - TEST_CATEGORY_ID = "Test Category Name" TEST_CAT_DESCRIPT = "test cat description" TEST_GROUP_ID = "Test Group Name" @@ -61,21 +26,55 @@ } -@pytest.fixture(autouse=True) -def init_session(request): - print("Initializing CWMS API session for locations operations test...") +# Setup and teardown fixture for test location +@pytest.fixture(scope="module", autouse=True) +def setup_data(): + + + TEST_LATITUDE = 45.1704758 + TEST_LONGITUDE = -92.8411439 + BASE_LOCATION_DATA = { + "name": TEST_LOCATION_ID, + "office-id": TEST_OFFICE, + "latitude": TEST_LATITUDE, + "longitude": TEST_LONGITUDE, + "elevation": 250.0, + "horizontal-datum": "NAD83", + "vertical-datum": "NAVD88", + "location-type": "TESTING", + "public-name": "Test Location", + "long-name": "A pytest-generated location", + "timezone-name": "America/Chicago", + "location-kind": "SITE", + "nation": "US", + } + + # Store location before tests + cwms.store_location(BASE_LOCATION_DATA) + + + + TS1_DATA = { + "name": TEST_TSID, + "units": "ft", + "office-id": TEST_OFFICE, + "values": [[1509654000000, 54.3, 0]], + } + # Store timeseries before tests + cwms.store_timeseries(TS1_DATA) -def test_store_location(): - locations.store_location(BASE_LOCATION_DATA) - df = locations.get_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE).df - assert TEST_LOCATION_ID in df["name"].values + yield + + # Delete location and TS after tests + cwms.delete_location( + location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True + ) -def test_store_timeseries(): - timeseries.store_timeseries(TS1_DATA) - data = timeseries.get_timeseries(ts_id=TEST_TSID, office_id=TEST_OFFICE).json - assert TEST_TSID in data["name"] +@pytest.fixture(autouse=True) +def init_session(request): + print("Initializing CWMS API session for locations operations test...") def test_store_timeseries_group(): @@ -172,13 +171,3 @@ def test_delete_timeseries_group(): tg.delete_timeseries_group( group_id=TEST_GROUP_ID, category_id=TEST_CATEGORY_ID, office_id=TEST_OFFICE ) - - -def test_delete_location(): - locations.delete_location( - location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True - ) - df_final = locations.get_locations( - office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID - ).df - assert df_final.empty or TEST_LOCATION_ID not in df_final.get("name", []) From 6d9193d1f7da218e6fa402aecf4f245a06456e05 Mon Sep 17 00:00:00 2001 From: msweier Date: Tue, 5 Aug 2025 12:25:02 -0500 Subject: [PATCH 14/14] update loc group to use module for tests --- tests/cda/locations/location_groups_test.py | 72 +++++++++---------- .../cda/timeseries/timeseries_groups_test.py | 3 - 2 files changed, 35 insertions(+), 40 deletions(-) diff --git a/tests/cda/locations/location_groups_test.py b/tests/cda/locations/location_groups_test.py index 78ff67f1..1a804928 100644 --- a/tests/cda/locations/location_groups_test.py +++ b/tests/cda/locations/location_groups_test.py @@ -1,30 +1,11 @@ import pandas as pd import pytest +import cwms import cwms.locations.location_groups as lg -import cwms.locations.physical_locations as locations TEST_OFFICE = "MVP" TEST_LOCATION_ID = "pytest_group" -TEST_LATITUDE = 45.1704758 -TEST_LONGITUDE = -92.8411439 - - -BASE_LOCATION_DATA = { - "name": TEST_LOCATION_ID, - "office-id": TEST_OFFICE, - "latitude": TEST_LATITUDE, - "longitude": TEST_LONGITUDE, - "elevation": 250.0, - "horizontal-datum": "NAD83", - "vertical-datum": "NAVD88", - "location-type": "TESTING", - "public-name": "Test Location", - "long-name": "A pytest-generated location", - "timezone-name": "America/Chicago", - "location-kind": "SITE", - "nation": "US", -} TEST_CATEGORY_ID = "Test Category Name" @@ -57,17 +38,45 @@ } +# Setup and teardown fixture for test location +@pytest.fixture(scope="module", autouse=True) +def setup_data(): + + TEST_LATITUDE = 45.1704758 + TEST_LONGITUDE = -92.8411439 + + BASE_LOCATION_DATA = { + "name": TEST_LOCATION_ID, + "office-id": TEST_OFFICE, + "latitude": TEST_LATITUDE, + "longitude": TEST_LONGITUDE, + "elevation": 250.0, + "horizontal-datum": "NAD83", + "vertical-datum": "NAVD88", + "location-type": "TESTING", + "public-name": "Test Location", + "long-name": "A pytest-generated location", + "timezone-name": "America/Chicago", + "location-kind": "SITE", + "nation": "US", + } + + # Store location before tests + cwms.store_location(BASE_LOCATION_DATA) + + yield + + # Delete location and TS after tests + cwms.delete_location( + location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True + ) + + @pytest.fixture(autouse=True) def init_session(request): print("Initializing CWMS API session for locations operations test...") -def test_store_location(): - locations.store_location(BASE_LOCATION_DATA) - df = locations.get_location(location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE).df - assert TEST_LOCATION_ID in df["name"].values - - def test_store_location_group(): lg.store_location_groups(data=LOC_GROUP_DATA) @@ -144,14 +153,3 @@ def test_delete_location_group(): office_id=TEST_OFFICE, cascade_delete=True, ) - - -def test_delete_location(): - locations.store_location(BASE_LOCATION_DATA) - locations.delete_location( - location_id=TEST_LOCATION_ID, office_id=TEST_OFFICE, cascade_delete=True - ) - df_final = locations.get_locations( - office_id=TEST_OFFICE, location_ids=TEST_LOCATION_ID - ).df - assert df_final.empty or TEST_LOCATION_ID not in df_final.get("name", []) diff --git a/tests/cda/timeseries/timeseries_groups_test.py b/tests/cda/timeseries/timeseries_groups_test.py index 65b6a279..66d18ddb 100644 --- a/tests/cda/timeseries/timeseries_groups_test.py +++ b/tests/cda/timeseries/timeseries_groups_test.py @@ -30,7 +30,6 @@ @pytest.fixture(scope="module", autouse=True) def setup_data(): - TEST_LATITUDE = 45.1704758 TEST_LONGITUDE = -92.8411439 @@ -53,8 +52,6 @@ def setup_data(): # Store location before tests cwms.store_location(BASE_LOCATION_DATA) - - TS1_DATA = { "name": TEST_TSID, "units": "ft",