diff --git a/microgridup.py b/microgridup.py index 7a02e5e..f1ec402 100644 --- a/microgridup.py +++ b/microgridup.py @@ -53,8 +53,12 @@ def main(data, invalidate_cache=True, open_results=False): assert 'threePhaseRelayCost' in data assert 'REOPT_INPUTS' in data assert isinstance(data['REOPT_INPUTS'], dict) + assert 'LOAD_GROWTH_PERCENT' in data and isinstance(data['LOAD_GROWTH_PERCENT'], (int, float)) + assert 'LOAD_GROWTH_SPECIFIC' in data and isinstance(data['LOAD_GROWTH_SPECIFIC'], dict) + assert 'ADDITIONAL_LOADSHAPE_CSV' in data + assert 'ADDITIONAL_LOADSHAPE_METER' in data and isinstance(data['ADDITIONAL_LOADSHAPE_METER'], str) # - jsCircuitModel is an optional key - assert len(data.keys()) == 12 or (len(data.keys()) == 13 and 'jsCircuitModel' in data) + assert len(data.keys()) in [15, 16] or (len(data.keys()) in [16, 17] and 'jsCircuitModel' in data) assert isinstance(invalidate_cache, bool) assert isinstance(open_results, bool) # Quick check to ensure MODEL_DIR contains only lowercase alphanumeric and dashes. No spaces or underscores. @@ -111,6 +115,8 @@ def main(data, invalidate_cache=True, open_results=False): _copy_files_from_uploads_into_model_dir(immutable_data['LOAD_CSV'], f'{absolute_model_directory}/loads.csv', logger) if immutable_data['OUTAGE_CSV'] is not None: _copy_files_from_uploads_into_model_dir(immutable_data['OUTAGE_CSV'], f'{absolute_model_directory}/outages.csv', logger) + if immutable_data['ADDITIONAL_LOADSHAPE_CSV'] is not None: + _copy_files_from_uploads_into_model_dir(immutable_data['ADDITIONAL_LOADSHAPE_CSV'], f'{absolute_model_directory}/additional_loadshape.csv', logger) os.system(f'touch "{absolute_model_directory}/0running.txt"') try: os.remove(f"{absolute_model_directory}/0crashed.txt") @@ -118,6 +124,8 @@ def main(data, invalidate_cache=True, open_results=False): pass # Run the full MicrogridUP analysis. try: + # Apply load growth to the loads.csv file + microgridup_design.apply_load_growth(immutable_data, logger) # - Calculate hosting capacity for the initial circuit uploaded by the user or created via the GUI microgridup_hosting_cap.run_hosting_capacity() # - For each microgrid, use REOPT to calculate the optimal amount of new generation assets and to calculate generation power output @@ -527,7 +535,7 @@ def get_immutable_dict(data): ''' Get an immutable copy of the data. Functions later in the call stack shouldn't need to modify the data. They should only need to read pieces of it to write some output. Working with an immutable dict is a way to maintain sanity as it gets passed around among all of our functions. This - function is recommened, but is not required (i.e. it could be commented-out and ignored completely) + function is recommended, but is not required (i.e. it could be commented-out and ignored completely) :param data: all of the data we need to run our model :type data: dict diff --git a/microgridup_design.py b/microgridup_design.py index bcecf37..6950aba 100644 --- a/microgridup_design.py +++ b/microgridup_design.py @@ -1,4 +1,5 @@ import os, json, shutil, statistics, logging +import tempfile from types import MappingProxyType from pathlib import Path import jinja2 as j2 @@ -826,6 +827,54 @@ def _create_production_factor_series_csv(data, logger, invalidate_cache): shutil.rmtree('reopt_loadshapes') +def apply_load_growth(immutable_data, logger): + ''' + Apply load growth modifications to loads.csv based on user inputs. + ''' + load_df = pd.read_csv('loads.csv') + # Ignore the timeseries column if it exists in loads.csv + n_rows = len(load_df) + expected_sum = n_rows * (n_rows + 1) // 2 # Sum of 1 to n_rows + if n_rows > 1 and load_df.iloc[:n_rows, 0].sum() == expected_sum: + load_df = load_df.iloc[:, 1:] + logger.info('Removed timeseries index column from loads.csv.') + # 1. Global growth: Multiply all loads by (1 + percent/100) + growth_percent = immutable_data.get('LOAD_GROWTH_PERCENT', 0.0) + if growth_percent != 0.0: + load_df.iloc[:, :] *= (1 + growth_percent / 100) # Apply to all columns (loads only, index removed if present) + logger.info(f'Applied {growth_percent}% global load growth.') + # 2. Specific meter growth: Multiply specific columns by their factors + growth_specific = immutable_data.get('LOAD_GROWTH_SPECIFIC', {}) + for meter, percent in growth_specific.items(): + if meter in load_df.columns: + load_df[meter] *= (1 + percent / 100) + logger.info(f"Applied {percent}% growth to meter '{meter}'.") + else: + logger.warning(f"Meter '{meter}' not found in loads.csv; skipping growth.") + # 3. Additional loadshape: Add values from CSV to a specific meter + loadshape_path = immutable_data.get('ADDITIONAL_LOADSHAPE_CSV') + meter = immutable_data.get('ADDITIONAL_LOADSHAPE_METER', '') + if loadshape_path and meter and meter in load_df.columns: + try: + # add_df = pd.read_csv(loadshape_path, header=None) # No header in additional loadshape CSV + add_df = pd.read_csv('additional_loadshape.csv', header=None) + if len(add_df.columns) == 1: + add_series = add_df.iloc[:, 0] + if len(add_series) == len(load_df): + load_df[meter] += add_series + logger.info(f'Added additional loadshape to meter "{meter}".') + else: + logger.error(f'Additional loadshape length ({len(add_series)}) does not match loads.csv ({len(load_df)}).') + else: + logger.error('Additional loadshape CSV must have exactly one column.') + except Exception as e: + logger.error(f'Error processing additional loadshape: {e}') + elif loadshape_path and meter: + logger.warning(f'Meter "{meter}" not found in loads.csv; skipping additional loadshape.') + # Save the modified DataFrame back to loads.csv + load_df.to_csv('loads.csv', index=False) + + def _tests(): # - Asssert that REopt's own tests pass reopt_jl._test() @@ -863,5 +912,85 @@ def _tests(): print('Ran all tests for microgridup_design.py.') +def _test_apply_load_growth(): + ''' + Test the apply_load_growth function with various scenarios. + ''' + # Set up logger + logger = microgridup.setup_logging('logs.log') + # Create temporary directory to avoid affecting real files + with tempfile.TemporaryDirectory() as temp_dir: + os.chdir(temp_dir) + # Create mock loads.csv (with timeseries index) + loads_data = { + 'timeseries': list(range(1, 25)), # 24 hours for simplicity + 'meter1': [10] * 24, + 'meter2': [20] * 24, + 'meter3': [30] * 24 + } + loads_df = pd.DataFrame(loads_data) + loads_df.to_csv('loads.csv', index=False) + # Test 1: Global growth + mock_data = MappingProxyType({ + 'LOAD_GROWTH_PERCENT': 10.0, # 10% growth + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '' + }) + apply_load_growth(mock_data, logger) + modified_df = pd.read_csv('loads.csv') + # Check that loads increased by 10% (index column should be removed) + assert modified_df['meter1'].iloc[0] == 11.0, f"Expected 11.0, got {modified_df['meter1'].iloc[0]}" + assert modified_df['meter2'].iloc[0] == 22.0, f"Expected 22.0, got {modified_df['meter2'].iloc[0]}" + print('Test 1 (Global growth): PASSED') + # Reset loads.csv for next test + loads_df.to_csv('loads.csv', index=False) + # Test 2: Specific meter growth + mock_data = MappingProxyType({ + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {'meter1': 5.0, 'meter2': 10.0}, # 5% to meter1, 10% to meter2 + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '' + }) + apply_load_growth(mock_data, logger) + modified_df = pd.read_csv('loads.csv') + assert modified_df['meter1'].iloc[0] == 10.5, f"Expected 10.5, got {modified_df['meter1'].iloc[0]}" + assert modified_df['meter2'].iloc[0] == 22.0, f"Expected 22.0, got {modified_df['meter2'].iloc[0]}" + assert modified_df['meter3'].iloc[0] == 30.0, f"Expected 30.0, got {modified_df['meter3'].iloc[0]}" # No change + print('Test 2 (Specific meter growth): PASSED') + # Reset loads.csv for next test + loads_df.to_csv('loads.csv', index=False) + # Test 3: Additional loadshape + # Create mock additional_loadshape.csv + additional_data = {'loadshape': [1] * 24} # Add 1 kW to each hour + additional_df = pd.DataFrame(additional_data) + additional_df.to_csv('additional_loadshape.csv', index=False) + mock_data = MappingProxyType({ + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': 'additional_loadshape.csv', # Present + 'ADDITIONAL_LOADSHAPE_METER': 'meter1' + }) + apply_load_growth(mock_data, logger) + modified_df = pd.read_csv('loads.csv') + assert modified_df['meter1'].iloc[0] == 11.0, f"Expected 11.0, got {modified_df['meter1'].iloc[0]}" # 10 + 1 + assert modified_df['meter2'].iloc[0] == 20.0, f"Expected 20.0, got {modified_df['meter2'].iloc[0]}" # No change + print('Test 3 (Additional loadshape): PASSED') + # Test 4: No changes (defaults) + loads_df.to_csv('loads.csv', index=False) + mock_data = MappingProxyType({ + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '' + }) + apply_load_growth(mock_data, logger) + modified_df = pd.read_csv('loads.csv') + assert modified_df['meter1'].iloc[0] == 10.0, f"Expected 10.0, got {modified_df['meter1'].iloc[0]}" + print('Test 4 (No changes): PASSED') + print('All apply_load_growth tests passed') + + if __name__ == '__main__': - _tests() \ No newline at end of file + _tests() + # _test_apply_load_growth() \ No newline at end of file diff --git a/microgridup_gui.py b/microgridup_gui.py index 0c1aa7e..10eea3a 100644 --- a/microgridup_gui.py +++ b/microgridup_gui.py @@ -553,6 +553,11 @@ def run(): data['OUTAGE_CSV'] = _get_uploaded_file_filepath(absolute_model_directory, 'outages.csv', f'{microgridup.MGU_DIR}/uploads/HISTORICAL_OUTAGES_{model_name}', request, 'HISTORICAL_OUTAGES', 'OUTAGES_PATH') data['LOAD_CSV'] = _get_uploaded_file_filepath(absolute_model_directory, 'loads.csv', f'{microgridup.MGU_DIR}/uploads/LOAD_CSV_{model_name}', request, 'LOAD_CSV', 'LOAD_CSV_NAME') data['BASE_DSS'] = _get_uploaded_file_filepath(absolute_model_directory, 'circuit.dss', f'{microgridup.MGU_DIR}/uploads/BASE_DSS_{model_name}', request, None, 'DSS_PATH') + data['ADDITIONAL_LOADSHAPE_CSV'] = _get_uploaded_file_filepath(absolute_model_directory, 'additional_loadshape.csv', f'{microgridup.MGU_DIR}/uploads/ADDITIONAL_LOADSHAPE_{model_name}', request, 'ADDITIONAL_LOADSHAPE', 'ADDITIONAL_LOADSHAPE_NAME') + data['ADDITIONAL_LOADSHAPE_METER'] = request.form.get('ADDITIONAL_LOADSHAPE_METER', '') # Meter name from form + # Clean up like LOAD_CSV + if 'ADDITIONAL_LOADSHAPE_NAME' in data: + del data['ADDITIONAL_LOADSHAPE_NAME'] # - Delete form keys that are not currently used past this point del data['DSS_PATH'] del data['OUTAGES_PATH'] @@ -563,6 +568,8 @@ def run(): # - Format the REopt inputs into the schema we want. This formatting needs to be done here (and not in microgridup.main) because otherwise # invocations of microgridup.main() would require REopt keys to be at the top level of the user's input dict, which would be really annoying data['REOPT_INPUTS'] = _get_reopt_inputs(data) + data['LOAD_GROWTH_PERCENT'] = float(request.form.get('LOAD_GROWTH_PERCENT', 0.0)) # Default to 0% (no growth) + data['LOAD_GROWTH_SPECIFIC'] = json.loads(request.form.get('LOAD_GROWTH_SPECIFIC') or '{}') # Default to empty dict # - Format relevant properties for _get_microgrids() data['CRITICAL_LOADS'] = json.loads(data['CRITICAL_LOADS']) if len(data['CRITICAL_LOADS']) == 0: diff --git a/templates/template_new.html b/templates/template_new.html index 26ced0a..9d2ab2f 100644 --- a/templates/template_new.html +++ b/templates/template_new.html @@ -293,6 +293,25 @@
Load Profile
Optional inputs
+
+ + + +
+
+ + +
+
+ + +
+
+ + +
Resilience
@@ -1756,6 +1775,65 @@

Step 5 (Optional): Override technology parameters per-microgrid

}; window.disableInputs(); {% endif %} + + // - Load Profile Validation + const loadProfileCsvLoadParser = new CsvLoadParser(); + // - Create an observer for the circuit builder's csvLoadParser to sync meter names + class LoadProfileValidator { + handleParsedCsv() { + const loads = window.csvLoadParser.getLoads(); + if (loads) { + const meterNames = loads.map(load => load.getProperty('name')); + populateMeterDropdown(meterNames); + } + } + } + const loadProfileValidator = new LoadProfileValidator(); + window.csvLoadParser.addObserver(loadProfileValidator); + + function populateMeterDropdown(meterNames) { + const select = document.getElementById('additionalLoadshapeMeterSelect'); + if (!select) { + console.error('additionalLoadshapeMeterSelect not found'); + return; + } + // Clear existing options except the first + while (select.options.length > 1) { + select.remove(1); + } + meterNames.forEach(name => { + const option = document.createElement('option'); + option.value = name; + option.textContent = name; + select.appendChild(option); + }); + } + + // - Override submitEverything to add validation + const originalSubmitEverything = submitEverything; + submitEverything = async function() { + // Validate LOAD_GROWTH_SPECIFIC + const growthSpecificTextarea = document.querySelector('[name="LOAD_GROWTH_SPECIFIC"]'); + if (growthSpecificTextarea && growthSpecificTextarea.value.trim()) { + try { + const growthSpecific = JSON.parse(growthSpecificTextarea.value); + const loads = window.csvLoadParser.getLoads(); + const validMeterNames = new Set(loads.map(load => load.getProperty('name'))); + for (const meter in growthSpecific) { + if (!validMeterNames.has(meter)) { + alert(`Invalid meter name "${meter}" in Specific Load Growth. Please select from available loads.`); + return; + } + } + } catch (e) { + alert('Invalid JSON in Specific Load Growth. Please check the format.'); + return; + } + } + + // Call original submitEverything + await originalSubmitEverything(); + }; \ No newline at end of file diff --git a/tests_backend_full.py b/tests_backend_full.py index 79ebe3f..c93fd89 100644 --- a/tests_backend_full.py +++ b/tests_backend_full.py @@ -115,6 +115,10 @@ def test_1mg(): 'DESCRIPTION': '', 'singlePhaseRelayCost': 300.0, 'threePhaseRelayCost': 20000.0, + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '', } # Run model. microgridup.main(data, invalidate_cache=False, open_results=True) @@ -223,6 +227,10 @@ def test_2mg(): 'DESCRIPTION': '', 'singlePhaseRelayCost': 300.0, 'threePhaseRelayCost': 20000.0, + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '', } # Run model. microgridup.main(data, invalidate_cache=False, open_results=True) @@ -347,7 +355,11 @@ def test_3mg(): ], 'DESCRIPTION': '', 'singlePhaseRelayCost': 300.0, - 'threePhaseRelayCost': 20000.0 + 'threePhaseRelayCost': 20000.0, + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '' } # Run model. microgridup.main(data, invalidate_cache=False, open_results=True) @@ -492,7 +504,11 @@ def test_4mg(): ], 'DESCRIPTION': '', 'singlePhaseRelayCost': 300.0, - 'threePhaseRelayCost': 20000.0 + 'threePhaseRelayCost': 20000.0, + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '' } # Run model. microgridup.main(data, invalidate_cache=False, open_results=True) @@ -586,7 +602,11 @@ def test_auto3mg(): ], 'DESCRIPTION': '', 'singlePhaseRelayCost': 300.0, - 'threePhaseRelayCost': 20000.0 + 'threePhaseRelayCost': 20000.0, + 'LOAD_GROWTH_PERCENT': 0.0, + 'LOAD_GROWTH_SPECIFIC': {}, + 'ADDITIONAL_LOADSHAPE_CSV': None, + 'ADDITIONAL_LOADSHAPE_METER': '' } # Run model. microgridup.main(data, invalidate_cache=False, open_results=True) @@ -1008,7 +1028,11 @@ def test_mackelroy(): ], "DESCRIPTION": "", "singlePhaseRelayCost": 300.0, - "threePhaseRelayCost": 20000.0 + "threePhaseRelayCost": 20000.0, + "LOAD_GROWTH_PERCENT": 0.0, + "LOAD_GROWTH_SPECIFIC": {}, + "ADDITIONAL_LOADSHAPE_CSV": None, + "ADDITIONAL_LOADSHAPE_METER": "" } # Run model. microgridup.main(data, invalidate_cache=False, open_results=True) @@ -1016,9 +1040,9 @@ def test_mackelroy(): sys.exit(1) if __name__ == '__main__': - test_1mg() - test_2mg() - test_3mg() - test_4mg() - test_auto3mg() + # test_1mg() + # test_2mg() + # test_3mg() + # test_4mg() + # test_auto3mg() test_mackelroy() \ No newline at end of file