Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions microgridup.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,12 @@ def main(data, invalidate_cache=True, open_results=False):
assert 'threePhaseRelayCost' in data
assert 'REOPT_INPUTS' in data
assert isinstance(data['REOPT_INPUTS'], dict)
assert 'LOAD_GROWTH_PERCENT' in data and isinstance(data['LOAD_GROWTH_PERCENT'], (int, float))
assert 'LOAD_GROWTH_SPECIFIC' in data and isinstance(data['LOAD_GROWTH_SPECIFIC'], dict)
assert 'ADDITIONAL_LOADSHAPE_CSV' in data
assert 'ADDITIONAL_LOADSHAPE_METER' in data and isinstance(data['ADDITIONAL_LOADSHAPE_METER'], str)
# - jsCircuitModel is an optional key
assert len(data.keys()) == 12 or (len(data.keys()) == 13 and 'jsCircuitModel' in data)
assert len(data.keys()) in [15, 16] or (len(data.keys()) in [16, 17] and 'jsCircuitModel' in data)
assert isinstance(invalidate_cache, bool)
assert isinstance(open_results, bool)
# Quick check to ensure MODEL_DIR contains only lowercase alphanumeric and dashes. No spaces or underscores.
Expand Down Expand Up @@ -111,13 +115,17 @@ def main(data, invalidate_cache=True, open_results=False):
_copy_files_from_uploads_into_model_dir(immutable_data['LOAD_CSV'], f'{absolute_model_directory}/loads.csv', logger)
if immutable_data['OUTAGE_CSV'] is not None:
_copy_files_from_uploads_into_model_dir(immutable_data['OUTAGE_CSV'], f'{absolute_model_directory}/outages.csv', logger)
if immutable_data['ADDITIONAL_LOADSHAPE_CSV'] is not None:
_copy_files_from_uploads_into_model_dir(immutable_data['ADDITIONAL_LOADSHAPE_CSV'], f'{absolute_model_directory}/additional_loadshape.csv', logger)
os.system(f'touch "{absolute_model_directory}/0running.txt"')
try:
os.remove(f"{absolute_model_directory}/0crashed.txt")
except FileNotFoundError:
pass
# Run the full MicrogridUP analysis.
try:
# Apply load growth to the loads.csv file
microgridup_design.apply_load_growth(immutable_data, logger)
# - Calculate hosting capacity for the initial circuit uploaded by the user or created via the GUI
microgridup_hosting_cap.run_hosting_capacity()
# - For each microgrid, use REOPT to calculate the optimal amount of new generation assets and to calculate generation power output
Expand Down Expand Up @@ -527,7 +535,7 @@ def get_immutable_dict(data):
'''
Get an immutable copy of the data. Functions later in the call stack shouldn't need to modify the data. They should only need to read pieces of it
to write some output. Working with an immutable dict is a way to maintain sanity as it gets passed around among all of our functions. This
function is recommened, but is not required (i.e. it could be commented-out and ignored completely)
function is recommended, but is not required (i.e. it could be commented-out and ignored completely)

:param data: all of the data we need to run our model
:type data: dict
Expand Down
131 changes: 130 additions & 1 deletion microgridup_design.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os, json, shutil, statistics, logging
import tempfile
from types import MappingProxyType
from pathlib import Path
import jinja2 as j2
Expand Down Expand Up @@ -826,6 +827,54 @@ def _create_production_factor_series_csv(data, logger, invalidate_cache):
shutil.rmtree('reopt_loadshapes')


def apply_load_growth(immutable_data, logger):
'''
Apply load growth modifications to loads.csv based on user inputs.
'''
load_df = pd.read_csv('loads.csv')
# Ignore the timeseries column if it exists in loads.csv
n_rows = len(load_df)
expected_sum = n_rows * (n_rows + 1) // 2 # Sum of 1 to n_rows
if n_rows > 1 and load_df.iloc[:n_rows, 0].sum() == expected_sum:
load_df = load_df.iloc[:, 1:]
logger.info('Removed timeseries index column from loads.csv.')
# 1. Global growth: Multiply all loads by (1 + percent/100)
growth_percent = immutable_data.get('LOAD_GROWTH_PERCENT', 0.0)
if growth_percent != 0.0:
load_df.iloc[:, :] *= (1 + growth_percent / 100) # Apply to all columns (loads only, index removed if present)
logger.info(f'Applied {growth_percent}% global load growth.')
# 2. Specific meter growth: Multiply specific columns by their factors
growth_specific = immutable_data.get('LOAD_GROWTH_SPECIFIC', {})
for meter, percent in growth_specific.items():
if meter in load_df.columns:
load_df[meter] *= (1 + percent / 100)
logger.info(f"Applied {percent}% growth to meter '{meter}'.")
else:
logger.warning(f"Meter '{meter}' not found in loads.csv; skipping growth.")
# 3. Additional loadshape: Add values from CSV to a specific meter
loadshape_path = immutable_data.get('ADDITIONAL_LOADSHAPE_CSV')
meter = immutable_data.get('ADDITIONAL_LOADSHAPE_METER', '')
if loadshape_path and meter and meter in load_df.columns:
try:
# add_df = pd.read_csv(loadshape_path, header=None) # No header in additional loadshape CSV
add_df = pd.read_csv('additional_loadshape.csv', header=None)
if len(add_df.columns) == 1:
add_series = add_df.iloc[:, 0]
if len(add_series) == len(load_df):
load_df[meter] += add_series
logger.info(f'Added additional loadshape to meter "{meter}".')
else:
logger.error(f'Additional loadshape length ({len(add_series)}) does not match loads.csv ({len(load_df)}).')
else:
logger.error('Additional loadshape CSV must have exactly one column.')
except Exception as e:
logger.error(f'Error processing additional loadshape: {e}')
elif loadshape_path and meter:
logger.warning(f'Meter "{meter}" not found in loads.csv; skipping additional loadshape.')
# Save the modified DataFrame back to loads.csv
load_df.to_csv('loads.csv', index=False)


def _tests():
# - Asssert that REopt's own tests pass
reopt_jl._test()
Expand Down Expand Up @@ -863,5 +912,85 @@ def _tests():
print('Ran all tests for microgridup_design.py.')


def _test_apply_load_growth():
'''
Test the apply_load_growth function with various scenarios.
'''
# Set up logger
logger = microgridup.setup_logging('logs.log')
# Create temporary directory to avoid affecting real files
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
# Create mock loads.csv (with timeseries index)
loads_data = {
'timeseries': list(range(1, 25)), # 24 hours for simplicity
'meter1': [10] * 24,
'meter2': [20] * 24,
'meter3': [30] * 24
}
loads_df = pd.DataFrame(loads_data)
loads_df.to_csv('loads.csv', index=False)
# Test 1: Global growth
mock_data = MappingProxyType({
'LOAD_GROWTH_PERCENT': 10.0, # 10% growth
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': ''
})
apply_load_growth(mock_data, logger)
modified_df = pd.read_csv('loads.csv')
# Check that loads increased by 10% (index column should be removed)
assert modified_df['meter1'].iloc[0] == 11.0, f"Expected 11.0, got {modified_df['meter1'].iloc[0]}"
assert modified_df['meter2'].iloc[0] == 22.0, f"Expected 22.0, got {modified_df['meter2'].iloc[0]}"
print('Test 1 (Global growth): PASSED')
# Reset loads.csv for next test
loads_df.to_csv('loads.csv', index=False)
# Test 2: Specific meter growth
mock_data = MappingProxyType({
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {'meter1': 5.0, 'meter2': 10.0}, # 5% to meter1, 10% to meter2
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': ''
})
apply_load_growth(mock_data, logger)
modified_df = pd.read_csv('loads.csv')
assert modified_df['meter1'].iloc[0] == 10.5, f"Expected 10.5, got {modified_df['meter1'].iloc[0]}"
assert modified_df['meter2'].iloc[0] == 22.0, f"Expected 22.0, got {modified_df['meter2'].iloc[0]}"
assert modified_df['meter3'].iloc[0] == 30.0, f"Expected 30.0, got {modified_df['meter3'].iloc[0]}" # No change
print('Test 2 (Specific meter growth): PASSED')
# Reset loads.csv for next test
loads_df.to_csv('loads.csv', index=False)
# Test 3: Additional loadshape
# Create mock additional_loadshape.csv
additional_data = {'loadshape': [1] * 24} # Add 1 kW to each hour
additional_df = pd.DataFrame(additional_data)
additional_df.to_csv('additional_loadshape.csv', index=False)
mock_data = MappingProxyType({
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': 'additional_loadshape.csv', # Present
'ADDITIONAL_LOADSHAPE_METER': 'meter1'
})
apply_load_growth(mock_data, logger)
modified_df = pd.read_csv('loads.csv')
assert modified_df['meter1'].iloc[0] == 11.0, f"Expected 11.0, got {modified_df['meter1'].iloc[0]}" # 10 + 1
assert modified_df['meter2'].iloc[0] == 20.0, f"Expected 20.0, got {modified_df['meter2'].iloc[0]}" # No change
print('Test 3 (Additional loadshape): PASSED')
# Test 4: No changes (defaults)
loads_df.to_csv('loads.csv', index=False)
mock_data = MappingProxyType({
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': ''
})
apply_load_growth(mock_data, logger)
modified_df = pd.read_csv('loads.csv')
assert modified_df['meter1'].iloc[0] == 10.0, f"Expected 10.0, got {modified_df['meter1'].iloc[0]}"
print('Test 4 (No changes): PASSED')
print('All apply_load_growth tests passed')


if __name__ == '__main__':
_tests()
_tests()
# _test_apply_load_growth()
7 changes: 7 additions & 0 deletions microgridup_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,11 @@ def run():
data['OUTAGE_CSV'] = _get_uploaded_file_filepath(absolute_model_directory, 'outages.csv', f'{microgridup.MGU_DIR}/uploads/HISTORICAL_OUTAGES_{model_name}', request, 'HISTORICAL_OUTAGES', 'OUTAGES_PATH')
data['LOAD_CSV'] = _get_uploaded_file_filepath(absolute_model_directory, 'loads.csv', f'{microgridup.MGU_DIR}/uploads/LOAD_CSV_{model_name}', request, 'LOAD_CSV', 'LOAD_CSV_NAME')
data['BASE_DSS'] = _get_uploaded_file_filepath(absolute_model_directory, 'circuit.dss', f'{microgridup.MGU_DIR}/uploads/BASE_DSS_{model_name}', request, None, 'DSS_PATH')
data['ADDITIONAL_LOADSHAPE_CSV'] = _get_uploaded_file_filepath(absolute_model_directory, 'additional_loadshape.csv', f'{microgridup.MGU_DIR}/uploads/ADDITIONAL_LOADSHAPE_{model_name}', request, 'ADDITIONAL_LOADSHAPE', 'ADDITIONAL_LOADSHAPE_NAME')
data['ADDITIONAL_LOADSHAPE_METER'] = request.form.get('ADDITIONAL_LOADSHAPE_METER', '') # Meter name from form
# Clean up like LOAD_CSV
if 'ADDITIONAL_LOADSHAPE_NAME' in data:
del data['ADDITIONAL_LOADSHAPE_NAME']
# - Delete form keys that are not currently used past this point
del data['DSS_PATH']
del data['OUTAGES_PATH']
Expand All @@ -563,6 +568,8 @@ def run():
# - Format the REopt inputs into the schema we want. This formatting needs to be done here (and not in microgridup.main) because otherwise
# invocations of microgridup.main() would require REopt keys to be at the top level of the user's input dict, which would be really annoying
data['REOPT_INPUTS'] = _get_reopt_inputs(data)
data['LOAD_GROWTH_PERCENT'] = float(request.form.get('LOAD_GROWTH_PERCENT', 0.0)) # Default to 0% (no growth)
data['LOAD_GROWTH_SPECIFIC'] = json.loads(request.form.get('LOAD_GROWTH_SPECIFIC') or '{}') # Default to empty dict
# - Format relevant properties for _get_microgrids()
data['CRITICAL_LOADS'] = json.loads(data['CRITICAL_LOADS'])
if len(data['CRITICAL_LOADS']) == 0:
Expand Down
78 changes: 78 additions & 0 deletions templates/template_new.html
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,25 @@ <h5>Load Profile</h5>
<a href="javascript:void(0)" class="toggle show">Optional inputs</a>
<div style="display:none">
<div id="year"></div>
<div class="chunk">
<!-- <label for="LOAD_GROWTH_PERCENT">Global Load Growth (%)</label> -->
<label class="tooltip" for="LOAD_GROWTH_PERCENT">Global Load Growth (%)<span class="classic">Input a whole number percentage to increase all loads by. increased_load = original_load * (1 + load_growth_percentage / 100).</span></label>
<input type="number" name="LOAD_GROWTH_PERCENT" value="0" step="1" min="0" max="500">
</div>
<div class="chunk">
<label class="tooltip" for="LOAD_GROWTH_SPECIFIC">Specific Load Growth (JSON format)<span class="classic">Input a JSON object with meter names and whole number percentages to increase specific loads. Example: {"meter1": 10, "meter2": 5} increases meter1 by 10% and meter2 by 5%.</span></label>
<textarea name="LOAD_GROWTH_SPECIFIC" placeholder='{"meter1": 10, "meter2": 5}' rows="1" cols="30"></textarea>
</div>
<div class="chunk">
<label class="tooltip" for="ADDITIONAL_LOADSHAPE">Additional Loadshape (.csv)<span class="classic">Upload a CSV file with a single column of values to add to a specific meter's load profile. Must have the same number of rows as your load data (typically 8760 for one year of hourly data).</span></label>
<input type="file" name="ADDITIONAL_LOADSHAPE" accept=".csv">
</div>
<div class="chunk">
<label class="tooltip" for="ADDITIONAL_LOADSHAPE_METER">Meter for Additional Loadshape<span class="classic">Select which meter to apply the additional loadshape to. The dropdown populates automatically based on the meters in your load CSV.</span></label>
<select name="ADDITIONAL_LOADSHAPE_METER" id="additionalLoadshapeMeterSelect">
<option value="">Select a meter</option>
</select>
</div>
</div>
</div>
<h5>Resilience</h5>
Expand Down Expand Up @@ -1756,6 +1775,65 @@ <h3>Step 5 (Optional): Override technology parameters per-microgrid</h3>
};
window.disableInputs();
{% endif %}

// - Load Profile Validation
const loadProfileCsvLoadParser = new CsvLoadParser();
// - Create an observer for the circuit builder's csvLoadParser to sync meter names
class LoadProfileValidator {
handleParsedCsv() {
const loads = window.csvLoadParser.getLoads();
if (loads) {
const meterNames = loads.map(load => load.getProperty('name'));
populateMeterDropdown(meterNames);
}
}
}
const loadProfileValidator = new LoadProfileValidator();
window.csvLoadParser.addObserver(loadProfileValidator);

function populateMeterDropdown(meterNames) {
const select = document.getElementById('additionalLoadshapeMeterSelect');
if (!select) {
console.error('additionalLoadshapeMeterSelect not found');
return;
}
// Clear existing options except the first
while (select.options.length > 1) {
select.remove(1);
}
meterNames.forEach(name => {
const option = document.createElement('option');
option.value = name;
option.textContent = name;
select.appendChild(option);
});
}

// - Override submitEverything to add validation
const originalSubmitEverything = submitEverything;
submitEverything = async function() {
// Validate LOAD_GROWTH_SPECIFIC
const growthSpecificTextarea = document.querySelector('[name="LOAD_GROWTH_SPECIFIC"]');
if (growthSpecificTextarea && growthSpecificTextarea.value.trim()) {
try {
const growthSpecific = JSON.parse(growthSpecificTextarea.value);
const loads = window.csvLoadParser.getLoads();
const validMeterNames = new Set(loads.map(load => load.getProperty('name')));
for (const meter in growthSpecific) {
if (!validMeterNames.has(meter)) {
alert(`Invalid meter name "${meter}" in Specific Load Growth. Please select from available loads.`);
return;
}
}
} catch (e) {
alert('Invalid JSON in Specific Load Growth. Please check the format.');
return;
}
}

// Call original submitEverything
await originalSubmitEverything();
};
</script>
</body>
</html>
42 changes: 33 additions & 9 deletions tests_backend_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,10 @@ def test_1mg():
'DESCRIPTION': '',
'singlePhaseRelayCost': 300.0,
'threePhaseRelayCost': 20000.0,
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': '',
}
# Run model.
microgridup.main(data, invalidate_cache=False, open_results=True)
Expand Down Expand Up @@ -223,6 +227,10 @@ def test_2mg():
'DESCRIPTION': '',
'singlePhaseRelayCost': 300.0,
'threePhaseRelayCost': 20000.0,
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': '',
}
# Run model.
microgridup.main(data, invalidate_cache=False, open_results=True)
Expand Down Expand Up @@ -347,7 +355,11 @@ def test_3mg():
],
'DESCRIPTION': '',
'singlePhaseRelayCost': 300.0,
'threePhaseRelayCost': 20000.0
'threePhaseRelayCost': 20000.0,
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': ''
}
# Run model.
microgridup.main(data, invalidate_cache=False, open_results=True)
Expand Down Expand Up @@ -492,7 +504,11 @@ def test_4mg():
],
'DESCRIPTION': '',
'singlePhaseRelayCost': 300.0,
'threePhaseRelayCost': 20000.0
'threePhaseRelayCost': 20000.0,
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': ''
}
# Run model.
microgridup.main(data, invalidate_cache=False, open_results=True)
Expand Down Expand Up @@ -586,7 +602,11 @@ def test_auto3mg():
],
'DESCRIPTION': '',
'singlePhaseRelayCost': 300.0,
'threePhaseRelayCost': 20000.0
'threePhaseRelayCost': 20000.0,
'LOAD_GROWTH_PERCENT': 0.0,
'LOAD_GROWTH_SPECIFIC': {},
'ADDITIONAL_LOADSHAPE_CSV': None,
'ADDITIONAL_LOADSHAPE_METER': ''
}
# Run model.
microgridup.main(data, invalidate_cache=False, open_results=True)
Expand Down Expand Up @@ -1008,17 +1028,21 @@ def test_mackelroy():
],
"DESCRIPTION": "",
"singlePhaseRelayCost": 300.0,
"threePhaseRelayCost": 20000.0
"threePhaseRelayCost": 20000.0,
"LOAD_GROWTH_PERCENT": 0.0,
"LOAD_GROWTH_SPECIFIC": {},
"ADDITIONAL_LOADSHAPE_CSV": None,
"ADDITIONAL_LOADSHAPE_METER": ""
}
# Run model.
microgridup.main(data, invalidate_cache=False, open_results=True)
if os.path.isfile(f'{microgridup.PROJ_DIR}/{data["MODEL_DIR"]}/0crashed.txt'):
sys.exit(1)

if __name__ == '__main__':
test_1mg()
test_2mg()
test_3mg()
test_4mg()
test_auto3mg()
# test_1mg()
# test_2mg()
# test_3mg()
# test_4mg()
# test_auto3mg()
test_mackelroy()