diff --git a/calliope_app/api/admin.py b/calliope_app/api/admin.py
index da0b5c06..276c9cec 100644
--- a/calliope_app/api/admin.py
+++ b/calliope_app/api/admin.py
@@ -9,11 +9,13 @@
Scenario_Loc_Tech, Scenario_Param, Job_Meta, Carrier
from api.models.outputs import Run
from api.models.engage import User_Profile, ComputeEnvironment
+from api.forms import ComputeEnvironmentModelForm
class ComputeEnvironmentAdmin(admin.ModelAdmin):
+ form = ComputeEnvironmentModelForm
filter_horizontal = ("users",)
- list_display = ['id', 'name', 'full_name', 'is_default', 'solver', 'ncpu', 'memory', 'type', '_users']
+ list_display = ['id', 'name', 'full_name', 'is_default', 'solvers', 'ncpu', 'memory', 'type', '_users']
@staticmethod
def _users(instance):
diff --git a/calliope_app/api/calliope_utils.py b/calliope_app/api/calliope_utils.py
index 1ab3e3c3..911742fa 100644
--- a/calliope_app/api/calliope_utils.py
+++ b/calliope_app/api/calliope_utils.py
@@ -3,19 +3,20 @@
interfacing with Calliope.
"""
+import calendar
+import copy
+import json
+import logging
import os
-import yaml
import shutil
-from calliope import Model as CalliopeModel
+
+import calliope
import pandas as pd
-import json
-import copy
-import calendar
+import yaml
+from calliope import Model as CalliopeModel
from api.models.configuration import Scenario_Param, Scenario_Loc_Tech, \
- Location, Tech_Param, Loc_Tech_Param, Loc_Tech, Scenario, Carrier
-from api.models.outputs import Run
-import logging
+ Location, Tech_Param, Loc_Tech_Param, Loc_Tech, Scenario
logger = logging.getLogger(__name__)
@@ -36,7 +37,7 @@ def get_model_yaml_set(run, scenario_id, year):
# NOTE: deprecated run parameter in the database
if unique_param == "run.objective_options":
continue
-
+
if unique_param not in unique_params:
# If parameter hasn't been set, add to Return List
unique_params.append(unique_param)
@@ -164,7 +165,7 @@ def get_loc_techs_yaml_set(scenario_id, year):
value = float(param.value) / 100
else:
value = param.value
-
+
param_list = [parent_type, location, 'techs',
param.loc_tech.technology.calliope_name]+\
unique_param.split('.')
@@ -173,7 +174,7 @@ def get_loc_techs_yaml_set(scenario_id, year):
def get_carriers_yaml_set(scenario_id):
model = Scenario.objects.get(id=scenario_id).model
-
+
carriers_yaml_set = {}
for carrier in model.carriers.all():
carriers_yaml_set[carrier.name] = {'rate':carrier.rate_unit,'quantity':carrier.quantity_unit}
@@ -229,6 +230,11 @@ def run_basic(model_path, logger):
model = CalliopeModel(config=model_path)
logger.info(model.info())
logger.info(model._model_data.coords.get("techs_non_transmission", []))
+
+ # NOTE: with log_to_console=True, the model run would get hanging if error happened.
+ if model.run_config['solver'] == 'appsi_highs':
+ model.run_config['solver_options'] = {'log_to_console': False}
+
model.run()
_write_outputs(model, model_path)
return model.results.termination_condition
@@ -241,6 +247,11 @@ def run_clustered(model_path, idx, logger):
_set_subset_time(model_path)
_set_capacities(model_path)
model = CalliopeModel(config=model_path)
+
+ # NOTE: with log_to_console=True, the model run would get hanging if error happened.
+ if model.run_config['solver'] == 'appsi_highs':
+ model.run_config['solver_options'] = {'log_to_console': False}
+
model.run()
_write_outputs(model, model_path)
if model.results.termination_condition != 'optimal':
@@ -488,155 +499,171 @@ def apply_gradient(old_inputs,old_results,new_inputs,old_year,new_year,logger):
new_loctechs = yaml.safe_load(open(new_inputs+'/locations.yaml','r'))
new_model = yaml.safe_load(open(new_inputs+'/model.yaml','r'))
- built_tech_names = []
+ built_tech_names = {}
built_techs = {}
built_loc_techs = {}
- for l in old_model['locations']:
- if 'techs' in old_model['locations'][l]:
- for t in old_model['locations'][l]['techs']:
- old_tech = old_model['techs'][t]
- new_tech = new_techs['techs'][t]
- new_loc_tech = new_loctechs['locations'][l]['techs'][t]
- loc_tech = old_model['locations'][l]['techs'][t]
- if ('energy_cap_max' in loc_tech.get('constraints',{}) or 'storage_cap_max' in loc_tech.get('constraints',{})) or\
- ('energy_cap_max' in old_tech.get('constraints',{}) or 'storage_cap_max' in old_tech.get('constraints',{})):
- if loc_tech.get('results',{'energy_cap_equals':0}).get('energy_cap_equals',0) != 0 or\
- loc_tech.get('results',{'storage_cap_equals':0}).get('storage_cap_equals',0) != 0:
- loc_tech_b = copy.deepcopy(loc_tech)
- built_tech_names.append(t)
-
- if 'constraints' in loc_tech_b:
- [loc_tech_b['constraints'].pop(c) for c in ['energy_cap_max', 'storage_cap_max'] if c in loc_tech_b['constraints']]
- else:
- loc_tech_b['constraints'] = {}
- if 'energy_cap_equals' in loc_tech['results']:
- loc_tech_b['constraints']['energy_cap_equals'] = loc_tech['results']['energy_cap_equals']
- if 'storage_cap_equals' in loc_tech['results']:
- loc_tech_b['constraints']['storage_cap_equals'] = loc_tech['results']['storage_cap_equals']
- cost_classes = [c for c in loc_tech_b.keys() if 'costs.' in c]
- for cost in cost_classes:
- [loc_tech_b[cost].pop(c) for c in ['energy_cap','interest_rate','storage_cap'] if c in loc_tech_b[cost]]
- loc_tech_b.pop('results')
-
- if new_loc_tech and 'constraints' in new_loc_tech:
- new_energy_cap_min = new_loc_tech['constraints'].get('energy_cap_min',new_tech.get('constraints',{}).get('energy_cap_min',0))
- new_energy_cap_max = new_loc_tech['constraints'].get('energy_cap_max',new_tech.get('constraints',{}).get('energy_cap_max',0))
- new_storage_cap_min = new_loc_tech['constraints'].get('storage_cap_min',new_tech.get('constraints',{}).get('storage_cap_min',0))
- new_storage_cap_max = new_loc_tech['constraints'].get('storage_cap_max',new_tech.get('constraints',{}).get('storage_cap_max',0))
- else:
- new_energy_cap_min = new_tech.get('constraints',{}).get('energy_cap_min',0)
- new_energy_cap_max = new_tech.get('constraints',{}).get('energy_cap_max',0)
- new_storage_cap_min = new_tech.get('constraints',{}).get('storage_cap_min',0)
- new_storage_cap_max = new_tech.get('constraints',{}).get('storage_cap_max',0)
+ for loc_type in ['locations','links']:
+ for l in old_model[loc_type]:
+ if 'techs' in old_model[loc_type][l]:
+ for t in old_model[loc_type][l]['techs']:
+ old_tech = old_model['techs'][t]
+ if t not in new_techs['techs']:
+ continue
+ new_tech = new_techs['techs'][t]
+ new_loc_tech = new_loctechs[loc_type][l]['techs'][t]
+ loc_tech = old_model[loc_type][l]['techs'][t]
+ if ('energy_cap_max' in loc_tech.get('constraints',{}) or 'storage_cap_max' in loc_tech.get('constraints',{})) or\
+ ('energy_cap_max' in old_tech.get('constraints',{}) or 'storage_cap_max' in old_tech.get('constraints',{})):
+ if loc_tech.get('results',{'energy_cap_equals':0}).get('energy_cap_equals',0) != 0 or\
+ loc_tech.get('results',{'storage_cap_equals':0}).get('storage_cap_equals',0) != 0:
+ loc_tech_b = copy.deepcopy(loc_tech)
+
+ # Record built techs and the total systemwide capacity of those techs to use with energy_cap_max_systemwide
+ if t in built_tech_names:
+ built_tech_names[t] += loc_tech.get('results',{'energy_cap_equals':0}).get('energy_cap_equals',0)
+ else:
+ built_tech_names[t] = loc_tech.get('results',{'energy_cap_equals':0}).get('energy_cap_equals',0)
- if new_loc_tech == None:
+ if 'constraints' in loc_tech_b:
+ [loc_tech_b['constraints'].pop(c) for c in ['energy_cap_max', 'storage_cap_max'] if c in loc_tech_b['constraints']]
+ else:
+ loc_tech_b['constraints'] = {}
+ if 'energy_cap_equals' in loc_tech['results']:
+ loc_tech_b['constraints']['energy_cap_equals'] = loc_tech['results']['energy_cap_equals']
+ if 'storage_cap_equals' in loc_tech['results']:
+ loc_tech_b['constraints']['storage_cap_equals'] = loc_tech['results']['storage_cap_equals']
+ if 'energy_cap_per_storage_cap_equals' in loc_tech_b['constraints']:
+ loc_tech_b['constraints'].pop('energy_cap_per_storage_cap_equals')
+ cost_classes = [c for c in loc_tech_b.keys() if 'costs.' in c]
+ for cost in cost_classes:
+ [loc_tech_b[cost].pop(c) for c in ['energy_cap','interest_rate','storage_cap'] if c in loc_tech_b[cost]]
+ loc_tech_b.pop('results')
+
+ if new_loc_tech and 'constraints' in new_loc_tech:
+ new_energy_cap_min = new_loc_tech['constraints'].get('energy_cap_min',new_tech.get('constraints',{}).get('energy_cap_min',0))
+ new_energy_cap_max = new_loc_tech['constraints'].get('energy_cap_max',new_tech.get('constraints',{}).get('energy_cap_max',0))
+ new_storage_cap_min = new_loc_tech['constraints'].get('storage_cap_min',new_tech.get('constraints',{}).get('storage_cap_min',0))
+ new_storage_cap_max = new_loc_tech['constraints'].get('storage_cap_max',new_tech.get('constraints',{}).get('storage_cap_max',0))
+ else:
+ new_energy_cap_min = new_tech.get('constraints',{}).get('energy_cap_min',0)
+ new_energy_cap_max = new_tech.get('constraints',{}).get('energy_cap_max',0)
+ new_storage_cap_min = new_tech.get('constraints',{}).get('storage_cap_min',0)
+ new_storage_cap_max = new_tech.get('constraints',{}).get('storage_cap_max',0)
+
+ if new_loc_tech == None:
new_loc_tech = {}
- if 'constraints' not in new_loc_tech:
+ if 'constraints' not in new_loc_tech:
new_loc_tech['constraints'] = {}
- if new_energy_cap_min > 0 and new_energy_cap_min-loc_tech['results']['energy_cap_equals'] > 0:
- new_loc_tech['constraints']['energy_cap_min'] = new_energy_cap_min-loc_tech['results']['energy_cap_equals']
- if new_loc_tech['constraints']['energy_cap_min'] < 0:
- new_loc_tech['constraints']['energy_cap_min'] = 0
- if new_energy_cap_max != 'inf' and new_energy_cap_max > 0:
- new_loc_tech['constraints']['energy_cap_max'] = new_energy_cap_max-loc_tech['results']['energy_cap_equals']
- if new_loc_tech['constraints']['energy_cap_max'] < 0:
- new_loc_tech['constraints']['energy_cap_max'] = 0
- if new_storage_cap_min > 0 and new_storage_cap_min-loc_tech['results']['storage_cap_equals'] > 0:
- new_loc_tech['constraints']['storage_cap_min'] = new_storage_cap_min-loc_tech['results']['storage_cap_equals']
- if new_loc_tech['constraints']['storage_cap_min'] < 0:
- new_loc_tech['constraints']['storage_cap_min'] = 0
- if new_storage_cap_max != 'inf' and new_storage_cap_max > 0:
- new_loc_tech['constraints']['storage_cap_max'] = new_storage_cap_max-loc_tech['results']['storage_cap_equals']
- if new_loc_tech['constraints']['storage_cap_max'] < 0:
- new_loc_tech['constraints']['storage_cap_max'] = 0
-
- new_loctechs['locations'][l]['techs'][t] = new_loc_tech
- for x in loc_tech_b:
- for y in loc_tech_b[x].keys():
- # Copy over timeseries files for old techs, updating year to match new year
- if 'file=' in str(loc_tech_b[x][y]):
- filename=loc_tech_b[x][y].replace('file=','').replace('.csv:value','')
- ts_df = pd.read_csv(old_inputs+'/'+filename+'.csv')
- ts_df['Unnamed: 0'] = pd.to_datetime(ts_df['Unnamed: 0'])
- freq = pd.infer_freq(ts_df['Unnamed: 0'])
- if not calendar.isleap(new_year):
- feb_29_mask = (ts_df['Unnamed: 0'].dt.month == 2) & (ts_df['Unnamed: 0'].dt.day == 29)
- ts_df = ts_df[~feb_29_mask]
- ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
- ts_df.drop(columns=['Unnamed: 0'], inplace=True)
- elif not calendar.isleap(old_year):
- ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
- ts_df.drop(columns=['Unnamed: 0'], inplace=True)
- idx = pd.date_range(ts_df.index.min(),ts_df.index.max(),freq=freq)
- ts_df = ts_df.reindex(idx, fill_value=0)
-
- # Leap Year Handling (Fill w/ Feb 28th)
- feb_28_mask = (ts_df.index.month == 2) & (ts_df.index.day == 28)
- feb_29_mask = (ts_df.index.month == 2) & (ts_df.index.day == 29)
- feb_28 = ts_df.loc[feb_28_mask, 'value'].values
- feb_29 = ts_df.loc[feb_29_mask, 'value'].values
- if ((len(feb_29) > 0) & (len(feb_28) > 0)):
- ts_df.loc[feb_29_mask, 'value'] = feb_28
- else:
- ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
- ts_df.drop(columns=['Unnamed: 0'], inplace=True)
- ts_df.index.name = None
- ts_df.to_csv(os.path.join(new_inputs,filename+'-'+str(old_year)+'.csv'),index=True)
- loc_tech_b[x][y] = 'file='+filename+'-'+str(old_year)+'.csv:value'
-
- if l not in built_loc_techs:
- built_loc_techs[l] = {}
- built_loc_techs[l][t+'_'+str(old_year)] = loc_tech_b
-
- new_loctechs['locations'][l]['techs'][t+'_'+str(old_year)] = loc_tech_b
- for t in built_tech_names:
+ if new_energy_cap_min > 0 and new_energy_cap_min-loc_tech['results']['energy_cap_equals'] > 0:
+ new_loc_tech['constraints']['energy_cap_min'] = new_energy_cap_min-loc_tech['results']['energy_cap_equals']
+ if new_loc_tech['constraints']['energy_cap_min'] < 0:
+ new_loc_tech['constraints']['energy_cap_min'] = 0
+ if new_energy_cap_max != 'inf' and new_energy_cap_max > 0:
+ new_loc_tech['constraints']['energy_cap_max'] = new_energy_cap_max-loc_tech['results']['energy_cap_equals']
+ if new_loc_tech['constraints']['energy_cap_max'] < 0:
+ new_loc_tech['constraints']['energy_cap_max'] = 0
+ if new_storage_cap_min > 0 and new_storage_cap_min-loc_tech['results']['storage_cap_equals'] > 0:
+ new_loc_tech['constraints']['storage_cap_min'] = new_storage_cap_min-loc_tech['results']['storage_cap_equals']
+ if new_loc_tech['constraints']['storage_cap_min'] < 0:
+ new_loc_tech['constraints']['storage_cap_min'] = 0
+ if new_storage_cap_max != 'inf' and new_storage_cap_max > 0:
+ new_loc_tech['constraints']['storage_cap_max'] = new_storage_cap_max-loc_tech['results']['storage_cap_equals']
+ if new_loc_tech['constraints']['storage_cap_max'] < 0:
+ new_loc_tech['constraints']['storage_cap_max'] = 0
+
+ new_loctechs[loc_type][l]['techs'][t] = new_loc_tech
+ for x in loc_tech_b:
+ for y in loc_tech_b[x].keys():
+ try:
+ # Copy over timeseries files for old techs, updating year to match new year
+ if 'file=' in loc_tech_b[x][y]:
+ filename=loc_tech_b[x][y].replace('file=','').replace('.csv:value','')
+ ts_df = pd.read_csv(old_inputs+'/'+filename+'.csv')
+ ts_df['Unnamed: 0'] = pd.to_datetime(ts_df['Unnamed: 0'])
+ freq = pd.infer_freq(ts_df['Unnamed: 0'])
+ if not calendar.isleap(new_year):
+ feb_29_mask = (ts_df['Unnamed: 0'].month == 2) & (ts_df['Unnamed: 0'].index.day == 29)
+ ts_df = ts_df[~feb_29_mask]
+ ts_df['Unnamed: 0'] = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
+ elif not calendar.isleap(old_year):
+ ts_df['Unnamed: 0'] = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
+ ts_df.index = ts_df['Unnamed: 0']
+
+ # Leap Year Handling (Fill w/ Feb 28th)
+ feb_28_mask = (ts_df.index.month == 2) & (ts_df.index.day == 28)
+ feb_29_mask = (ts_df.index.month == 2) & (ts_df.index.day == 29)
+ feb_28 = ts_df.loc[feb_28_mask, 'value'].values
+ feb_29 = ts_df.loc[feb_29_mask, 'value'].values
+ if ((len(feb_29) > 0) & (len(feb_28) > 0)):
+ ts_df.loc[feb_29_mask, 'value'] = feb_28
+ ts_df['Unnamed: 0'] = ts_df.index
+ ts_df.to_csv(new_inputs+filename+'-'+str(old_year)+'.csv',index=False)
+ loc_tech_b[x][y] = 'file='+filename+'-'+str(old_year)+'.csv:value'
+ except TypeError:
+ continue
+
+ if l not in built_loc_techs:
+ built_loc_techs[l] = {}
+ built_loc_techs[l][t+'_'+str(old_year)] = loc_tech_b
+
+ new_loctechs[loc_type][l]['techs'][t+'_'+str(old_year)] = loc_tech_b
+
+ for t in built_tech_names.keys():
tech = old_model['techs'][t]
-
tech_b = copy.deepcopy(tech)
+
+ # Handle systemwide energy cap gradient
+ if 'constraints' in new_techs['techs'][t]:
+ if 'energy_cap_max_systemwide' in new_techs['techs'][t]['constraints']:
+ new_techs['techs'][t]['constraints']['energy_cap_max_systemwide'] = max([new_techs['techs'][t]['constraints']['energy_cap_max_systemwide']-built_tech_names[t],0])
+ if 'energy_cap_equals_systemwide' in new_techs['techs'][t]['constraints']:
+ new_techs['techs'][t]['constraints']['energy_cap_max_systemwide'] = max([new_techs['techs'][t]['constraints']['energy_cap_equals_systemwide']-built_tech_names[t],0])
+
if 'constraints' in tech_b:
- [tech_b['constraints'].pop(c) for c in ['energy_cap_max', 'storage_cap_max'] if c in tech_b['constraints']]
+ [tech_b['constraints'].pop(c) for c in ['energy_cap_max', 'storage_cap_max','energy_cap_per_storage_cap_equals'] if c in tech_b['constraints']]
cost_classes = [c for c in tech_b.keys() if 'costs.' in c]
for cost in cost_classes:
[tech_b[cost].pop(c) for c in ['energy_cap','interest_rate','storage_cap'] if c in tech_b[cost]]
if len(tech_b[cost].keys()) == 0:
tech_b.pop(cost)
-
+
tech_b['essentials']['name'] += ' '+str(old_year)
for x in tech_b:
- for y in tech_b[x].keys():
- # Copy over timeseries files for old techs, updating year to match new year
- if 'file=' in str(tech_b[x][y]):
- filename=tech_b[x][y].replace('file=','').replace('.csv:value','')
- ts_df = pd.read_csv(old_inputs+'/'+filename+'.csv')
- ts_df['Unnamed: 0'] = pd.to_datetime(ts_df['Unnamed: 0'])
- freq = pd.infer_freq(ts_df['Unnamed: 0'])
- if not calendar.isleap(new_year):
- feb_29_mask = (ts_df['Unnamed: 0'].dt.month == 2) & (ts_df['Unnamed: 0'].dt.day == 29)
- ts_df = ts_df[~feb_29_mask]
- ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
- ts_df.drop(columns=['Unnamed: 0'], inplace=True)
- elif not calendar.isleap(old_year):
- ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
- ts_df.drop(columns=['Unnamed: 0'], inplace=True)
- idx = pd.date_range(ts_df.index.min(),ts_df.index.max(),freq=freq)
- ts_df = ts_df.reindex(idx, fill_value=0)
-
- # Leap Year Handling (Fill w/ Feb 28th)
- feb_28_mask = (ts_df.index.month == 2) & (ts_df.index.day == 28)
- feb_29_mask = (ts_df.index.month == 2) & (ts_df.index.day == 29)
- feb_28 = ts_df.loc[feb_28_mask, 'value'].values
- feb_29 = ts_df.loc[feb_29_mask, 'value'].values
- if ((len(feb_29) > 0) & (len(feb_28) > 0)):
- ts_df.loc[feb_29_mask, 'value'] = feb_28
- else:
- ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
- ts_df.drop(columns=['Unnamed: 0'], inplace=True)
- ts_df.index.name = None
- ts_df.to_csv(os.path.join(new_inputs,filename+'-'+str(old_year)+'.csv'),index=True)
- tech_b[x][y] = 'file='+filename+'-'+str(old_year)+'.csv:value'
+ for y in tech_b[x]:
+ for y in tech_b[x].keys():
+ # Copy over timeseries files for old techs, updating year to match new year
+ if 'file=' in str(tech_b[x][y]):
+ filename=tech_b[x][y].replace('file=','').replace('.csv:value','')
+ ts_df = pd.read_csv(old_inputs+'/'+filename+'.csv')
+ ts_df['Unnamed: 0'] = pd.to_datetime(ts_df['Unnamed: 0'])
+ freq = pd.infer_freq(ts_df['Unnamed: 0'])
+ if not calendar.isleap(new_year):
+ feb_29_mask = (ts_df['Unnamed: 0'].dt.month == 2) & (ts_df['Unnamed: 0'].dt.day == 29)
+ ts_df = ts_df[~feb_29_mask]
+ ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
+ ts_df.drop(columns=['Unnamed: 0'], inplace=True)
+ elif not calendar.isleap(old_year):
+ ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
+ ts_df.drop(columns=['Unnamed: 0'], inplace=True)
+ idx = pd.date_range(ts_df.index.min(),ts_df.index.max(),freq=freq)
+ ts_df = ts_df.reindex(idx, fill_value=0)
+
+ # Leap Year Handling (Fill w/ Feb 28th)
+ feb_28_mask = (ts_df.index.month == 2) & (ts_df.index.day == 28)
+ feb_29_mask = (ts_df.index.month == 2) & (ts_df.index.day == 29)
+ feb_28 = ts_df.loc[feb_28_mask, 'value'].values
+ feb_29 = ts_df.loc[feb_29_mask, 'value'].values
+ if ((len(feb_29) > 0) & (len(feb_28) > 0)):
+ ts_df.loc[feb_29_mask, 'value'] = feb_28
+ else:
+ ts_df.index = ts_df['Unnamed: 0'].apply(lambda x: x.replace(year=new_year))
+ ts_df.drop(columns=['Unnamed: 0'], inplace=True)
+ ts_df.index.name = None
+ ts_df.to_csv(os.path.join(new_inputs,filename+'-'+str(old_year)+'.csv'),index=True)
+ tech_b[x][y] = 'file='+filename+'-'+str(old_year)+'.csv:value'
built_techs[t+'_'+str(old_year)] = tech_b
new_techs['techs'][t+'_'+str(old_year)] = tech_b
@@ -664,4 +691,4 @@ def apply_gradient(old_inputs,old_results,new_inputs,old_year,new_year,logger):
yaml.dump(new_loctechs,outfile,default_flow_style=False)
with open(new_inputs+'/model.yaml', 'w') as outfile:
- yaml.dump(new_model,outfile,default_flow_style=False)
\ No newline at end of file
+ yaml.dump(new_model,outfile,default_flow_style=False)
diff --git a/calliope_app/api/engage.py b/calliope_app/api/engage.py
index 086d8ed9..3ee87c8d 100644
--- a/calliope_app/api/engage.py
+++ b/calliope_app/api/engage.py
@@ -6,6 +6,28 @@
from django.core.mail.message import sanitize_address
+ENGAGE_SOLVERS = [
+ {
+ "name": "appsi_highs",
+ "pretty_name": "HiGHS",
+ "order": 1,
+ "is_active": True
+ },
+ {
+ "name": "cbc",
+ "pretty_name": "CBC",
+ "order": 2,
+ "is_active": True
+ },
+ {
+ "name": "amplxpress",
+ "pretty_name": "Xpress",
+ "order": 3,
+ "is_active": False
+ }
+]
+
+
def aws_ses_configured():
"""
Check the configuration of AWS SES settings
diff --git a/calliope_app/api/forms.py b/calliope_app/api/forms.py
new file mode 100644
index 00000000..d300bf2b
--- /dev/null
+++ b/calliope_app/api/forms.py
@@ -0,0 +1,15 @@
+
+from django import forms
+
+from client.widgets import JSONEditorWidget
+
+from api.models.engage import ComputeEnvironment
+
+
+class ComputeEnvironmentModelForm(forms.ModelForm):
+ class Meta:
+ model = ComputeEnvironment
+ fields = '__all__'
+ widgets = {
+ 'solvers': JSONEditorWidget()
+ }
diff --git a/calliope_app/api/migrations/0068_remove_computeenvironment_solver_and_more.py b/calliope_app/api/migrations/0068_remove_computeenvironment_solver_and_more.py
new file mode 100644
index 00000000..4d548fd6
--- /dev/null
+++ b/calliope_app/api/migrations/0068_remove_computeenvironment_solver_and_more.py
@@ -0,0 +1,23 @@
+# Generated by Django 4.2.15 on 2024-09-17 03:26
+
+import api.models.engage
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0067_auto_20240613_1706'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='computeenvironment',
+ name='solver',
+ ),
+ migrations.AddField(
+ model_name='computeenvironment',
+ name='solvers',
+ field=models.JSONField(default=api.models.engage.default_solvers),
+ ),
+ ]
diff --git a/calliope_app/api/models/engage.py b/calliope_app/api/models/engage.py
index e0b19b64..822998a0 100644
--- a/calliope_app/api/models/engage.py
+++ b/calliope_app/api/models/engage.py
@@ -9,6 +9,8 @@
from django.urls import reverse
from django.utils.html import mark_safe
+from api.engage import ENGAGE_SOLVERS
+
logger = logging.getLogger(__name__)
@@ -102,6 +104,10 @@ def activate(cls, activation_uuid):
return True
+def default_solvers():
+ return ENGAGE_SOLVERS
+
+
class ComputeEnvironment(models.Model):
ENV_TYPES = [
@@ -114,11 +120,11 @@ class ComputeEnvironment(models.Model):
full_name = models.CharField(max_length=120)
is_default = models.BooleanField(default=False)
type = models.CharField(max_length=60, choices=ENV_TYPES)
- solver = models.CharField(max_length=60, null=True, blank=True)
ncpu = models.PositiveSmallIntegerField(null=True, blank=True)
memory = models.PositiveSmallIntegerField(null=True, blank=True)
cmd = models.TextField(blank=True, null=True)
users = models.ManyToManyField(User, related_name="compute_environments", blank=True)
+ solvers = models.JSONField(default=default_solvers)
class Meta:
db_table = "compute_environments"
@@ -141,4 +147,4 @@ class Meta:
verbose_name_plural = "[Admin] Request Rate Limits"
def __str__(self):
- return f"{self.year}, {self.month}, {self.total}"
\ No newline at end of file
+ return f"{self.year}, {self.month}, {self.total}"
diff --git a/calliope_app/api/urls.py b/calliope_app/api/urls.py
index e8dfb753..8431adb9 100644
--- a/calliope_app/api/urls.py
+++ b/calliope_app/api/urls.py
@@ -133,6 +133,8 @@
path('upload_outputs/',
outputs_views.upload_outputs,
name='upload_outputs'),
+ path('solvers/', outputs_views.solvers,
+ name="solvers"),
# Bulk Data
path('upload_locations/',
diff --git a/calliope_app/api/views/outputs.py b/calliope_app/api/views/outputs.py
index 8515cc73..c6dbd9df 100644
--- a/calliope_app/api/views/outputs.py
+++ b/calliope_app/api/views/outputs.py
@@ -3,16 +3,13 @@
import io
import logging
import os
-import shutil
import zipfile
-import sys
from re import match
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
import pandas as pd
-import pint
from celery import current_app,chain
from django.views.decorators.csrf import csrf_protect
@@ -30,19 +27,45 @@
from api.models.calliope import Abstract_Tech, Abstract_Tech_Param, Parameter, Run_Parameter
from api.models.configuration import (
Model, ParamsManager, User_File, Location, Technology,
- Tech_Param, Loc_Tech, Loc_Tech_Param, Timeseries_Meta, Carrier, Scenario_Param
+ Tech_Param, Loc_Tech, Loc_Tech_Param, Timeseries_Meta, Carrier
)
from api.models.engage import ComputeEnvironment
+from api.engage import ENGAGE_SOLVERS
from api.utils import zip_folder, initialize_units, convert_units, noconv_units
from batch.managers import AWSBatchJobManager
from taskmeta.models import CeleryTask, BatchTask, batch_task_status
-from calliope_app.celery import app
-
logger = logging.getLogger(__name__)
+@csrf_protect
+def solvers(request):
+ env_name = request.GET.get("env_name", None)
+ if not env_name:
+ env_name = "default"
+
+ flag = True
+ try:
+ env = ComputeEnvironment.objects.get(name=env_name)
+ except ComputeEnvironment.DoesNotExist:
+ flag = False
+
+ if (not flag) or (not env.solvers) or (not isinstance(env.solvers, list)):
+ solvers = ENGAGE_SOLVERS
+ else:
+ solvers = env.solvers
+
+ candidates = []
+ for solver in solvers:
+ is_active = solver.get("is_active", "false")
+ if (is_active is True) or (is_active == "true"):
+ candidates.append(solver)
+ payload = sorted(candidates, key=lambda x: x["order"])
+
+ return HttpResponse(json.dumps(payload), content_type="application/json")
+
+
@csrf_protect
def build(request):
"""
@@ -156,13 +179,13 @@ def build(request):
)
inputs_path = inputs_path.lower().replace(" ", "-")
os.makedirs(inputs_path, exist_ok=True)
-
+
run.run_options = []
for id in parameters.keys():
run_parameter= Run_Parameter.objects.get(pk=int(id))
run.run_options.append({'root':run_parameter.root,'name':run_parameter.name,'value':parameters[id]})
-
- # Celery task
+
+ # Celery task
async_result = build_model.apply_async(
kwargs={
"inputs_path": inputs_path,
@@ -300,7 +323,7 @@ def optimize(request):
r.batch_job.status = batch_task_status.FAILED
r.batch_job.save()
r.save()
-
+
if not all_complete:
payload = {
"status": "BLOCKED",
@@ -344,7 +367,7 @@ def optimize(request):
else:
logger.info("Found a subsequent gradient model for year %s but it was not built.",next_run.year)
break
-
+
# Unknown environment, not supported
else:
raise Exception("Failed to submit job, unknown compute environment")
diff --git a/calliope_app/calliope-files/backend/run.py b/calliope_app/calliope-files/backend/run.py
index 0a70e0c1..4ee335d9 100644
--- a/calliope_app/calliope-files/backend/run.py
+++ b/calliope_app/calliope-files/backend/run.py
@@ -152,10 +152,10 @@ def run_plan(
# NOTE: pyomo==6.7.0 opt does not have name attribute
# Disable this for use 'appsi_highs' solver, which does not have 'persistent' in name.
- # if "persistent" in opt.name and persistent is True:
- # results.attrs["objective_function_value"] = opt.get_model_attr("ObjVal")
- # else:
- results.attrs["objective_function_value"] = backend_model.obj()
+ if "appsi_highs" not in solver and "persistent" in opt.name and persistent is True:
+ results.attrs["objective_function_value"] = opt.get_model_attr("ObjVal")
+ else:
+ results.attrs["objective_function_value"] = backend_model.obj()
else:
results = xr.Dataset(attrs={"termination_condition": termination})
diff --git a/calliope_app/client/static/js/add_run.js b/calliope_app/client/static/js/add_run.js
index 10998443..cce1ff72 100644
--- a/calliope_app/client/static/js/add_run.js
+++ b/calliope_app/client/static/js/add_run.js
@@ -1,209 +1,235 @@
-$(document).ready(function () {
-
- add_run_precheck();
-
- $('#master-cancel').removeClass('hide');
- $('#master-save').removeClass('hide');
-
- $('#master-cancel').on('click', function () {
- var model_uuid = $('#header').data('model_uuid');
- window.location = '/' + model_uuid + '/runs/';
- });
-
- $('#master-save').on('click', function () {
- var model_uuid = $('#header').data('model_uuid'),
- scenario_id = $("#scenario").data('scenario_id'),
- start_date = $('#start_date').val(),
- end_date = $('#end_date').val(),
- cluster = $('#cluster').is(":checked"),
- manual = $('#manual').is(":checked"),
- timestep = $('#timestep').val(),
- sd = new Date(start_date),
- ed = new Date(end_date),
- run_env = $('#run-environment option:selected').text(),
- years = $('#years').val(),
- notes = $('#notes').val();
-
- var parameters = {};
- $('#run_parameters .parameter-row').each(function() {
- var paramId = $(this).data('param-id');
- var value = $(this).find('.run-parameter-value').val();
- parameters[paramId] = value;
- });
- console.log(parameters);
-
-
- // fix timezone issues
- sd = new Date(sd.getTime() + sd.getTimezoneOffset() * 60000);
- ed = new Date(ed.getTime() + ed.getTimezoneOffset() * 60000);
-
- var validated = true;
- if (scenario_id == undefined) {
- alert('Must choose a Scenario')
- validated = false;
- } else if (!(sd & ed)) {
- alert('Must select a date range below.');
- validated = false;
- } else if (sd > ed) {
- alert('Start date can not be later then the end date.');
- validated = false;
- } else if (sd.getFullYear() != ed.getFullYear()) {
- alert('Start date and end date must occur within the same year')
- validated = false;
- };
-
- if (validated) {
- $('#master-save').prop('disabled', true);
-
- $.ajax({
- url: '/' + LANGUAGE_CODE + '/api/build/',
- contentType: 'application/json', // Specify that you're sending JSON
- data: {
- 'model_uuid': model_uuid,
- 'scenario_id': scenario_id,
- 'start_date': start_date,
- 'end_date': end_date,
- 'cluster': cluster,
- 'manual': manual,
- 'timestep': timestep,
- 'run_env': run_env,
- 'years': years,
- 'notes': notes,
- 'parameters': JSON.stringify(parameters)
- },
- dataType: 'json',
- success: function (data) {
- if (data['status'] == 'Success') {
- window.location = '/' + model_uuid + '/runs/';
- } else {
- $('#build-error').html(data['message']);
- $('#master-save').prop('disabled', false);
- };
- }
- });
- };
- });
-
- // Automatically deactivate clustering if manual is enabled.
- $('#manual').on('click', function () {
- if ($('#manual').is(":checked")) {
- $('#cluster').prop('checked', false);
- }
- });
-
-});
-
-
-function add_run_precheck() {
- var model_uuid = $('#header').data('model_uuid'),
- scenario_id = $("#scenario").data('scenario_id');
- $.ajax({
- url: '/' + LANGUAGE_CODE + '/component/add_run_precheck/',
- data: {
- 'model_uuid': model_uuid,
- 'scenario_id': scenario_id,
- },
- dataType: 'json',
- success: function (data) {
- $('#add_run_precheck').html(data['html']);
- render_gantt();
- activate_tiles();
- }
- });
-};
-
-function activate_tiles() {
- $('.selection_tile').on('click', function () {
- var start_date = $(this).data('start_date'),
- end_date = $(this).data('end_date');
- $('.selection_tile').removeClass('btn-outline-primary')
- $(this).addClass('btn-outline-primary')
- $('#start_date').val(start_date);
- $('#end_date').val(end_date);
- })
-}
-
-function render_gantt() {
-
- var data = $('#timeseries_gantt').data('timeseries');
-
- var margin = { top: 40, right: 40, bottom: 20, left: 40 },
- width = $('#timeseries_gantt').width() - margin.left - margin.right,
- bar_height = 16
- height = (bar_height + 4) * data.length;
-
- // Prep data
- var parseDate = d3.timeParse("%m/%d/%Y, %H:%M:%S");
- data.forEach(function (d) {
- d.node = d[0]
- d.parameter = d[1]
- d.start_date = parseDate(d[2]);
- d.end_date = parseDate(d[3]);
- });
-
- // X Axis
- var start_date = d3.min(data, function (d) { return d.start_date }),
- end_date = d3.max(data, function (d) { return d.end_date });
- var x = d3.scaleTime()
- .domain([start_date, end_date])
- .range([0, width]);
- var xAxis = d3.axisTop()
- .scale(x);
-
- // Y Axis
- var y = d3.scaleLinear()
- .domain([data.length, 0])
- .range([height, 0]);
-
-
- // Draw
- var svg = d3.select("#timeseries_gantt").append("svg")
- .attr("width", width + margin.left + margin.right)
- .attr("height", height + margin.top + margin.bottom)
- .append("g")
- .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
-
- // Define the div for the tooltip
- var tooltip = d3.select("body").append("div")
- .attr("class", "tooltip")
- .style("background-color", "white")
- .style("border", "solid 3px black")
- .style("padding", "5px")
- .style("opacity", 0);
-
- svg.append("g")
- .attr("class", "x axis")
- .style("font-size", "1.2em")
- .call(xAxis)
- var g = svg.selectAll()
- .data(data).enter().append("g");
-
- g.append("rect")
- .attr("height", bar_height)
- .attr("width", function (d) { return x(end_date) - x(start_date); })
- .attr("x", function (d) { return x(start_date); })
- .attr("y", function (d, i) { return y(i) + (bar_height / 2); })
- .style("fill", "red")
- .style("opacity", "0.2");
-
- g.append("rect")
- .attr("height", bar_height)
- .attr("width", function (d) { return x(d.end_date) - x(d.start_date); })
- .attr("x", function (d) { return x(d.start_date); })
- .attr("y", function (d, i) { return y(i) + (bar_height / 2); })
- .style("fill", "green")
- .on("mouseover", function (d) {
- tooltip.transition()
- .duration(200)
- .style("opacity", 1);
- tooltip.html("" + d.node + "
" + d.parameter)
- .style("left", (d3.event.pageX - 100) + "px")
- .style("top", (d3.event.pageY - 50) + "px");
- })
- .on("mouseout", function (d) {
- tooltip.transition()
- .duration(500)
- .style("opacity", 0);
- });
-
-};
+$(document).ready(function () {
+
+ add_run_precheck();
+
+ $('#master-cancel').removeClass('hide');
+ $('#master-save').removeClass('hide');
+
+ $('#master-cancel').on('click', function () {
+ var model_uuid = $('#header').data('model_uuid');
+ window.location = '/' + model_uuid + '/runs/';
+ });
+
+ $('#master-save').on('click', function () {
+ var model_uuid = $('#header').data('model_uuid'),
+ scenario_id = $("#scenario").data('scenario_id'),
+ start_date = $('#start_date').val(),
+ end_date = $('#end_date').val(),
+ cluster = $('#cluster').is(":checked"),
+ manual = $('#manual').is(":checked"),
+ timestep = $('#timestep').val(),
+ sd = new Date(start_date),
+ ed = new Date(end_date),
+ run_env = $('#run-environment option:selected').text(),
+ years = $('#years').val(),
+ notes = $('#notes').val();
+
+ var parameters = {};
+ $('#run_parameters .parameter-row').each(function() {
+ var paramId = $(this).data('param-id');
+ var value = $(this).find('.run-parameter-value').val();
+ parameters[paramId] = value;
+ });
+
+ // fix timezone issues
+ sd = new Date(sd.getTime() + sd.getTimezoneOffset() * 60000);
+ ed = new Date(ed.getTime() + ed.getTimezoneOffset() * 60000);
+
+ var validated = true;
+ if (scenario_id == undefined) {
+ alert('Must choose a Scenario')
+ validated = false;
+ } else if (!(sd & ed)) {
+ alert('Must select a date range below.');
+ validated = false;
+ } else if (sd > ed) {
+ alert('Start date can not be later then the end date.');
+ validated = false;
+ } else if (sd.getFullYear() != ed.getFullYear()) {
+ alert('Start date and end date must occur within the same year')
+ validated = false;
+ };
+
+ if (validated) {
+ $('#master-save').prop('disabled', true);
+
+ $.ajax({
+ url: '/' + LANGUAGE_CODE + '/api/build/',
+ contentType: 'application/json', // Specify that you're sending JSON
+ data: {
+ 'model_uuid': model_uuid,
+ 'scenario_id': scenario_id,
+ 'start_date': start_date,
+ 'end_date': end_date,
+ 'cluster': cluster,
+ 'manual': manual,
+ 'timestep': timestep,
+ 'run_env': run_env,
+ 'years': years,
+ 'notes': notes,
+ 'parameters': JSON.stringify(parameters)
+ },
+ dataType: 'json',
+ success: function (data) {
+ if (data['status'] == 'Success') {
+ window.location = '/' + model_uuid + '/runs/';
+ } else {
+ $('#build-error').html(data['message']);
+ $('#master-save').prop('disabled', false);
+ };
+ }
+ });
+ };
+ });
+
+ // Automatically deactivate clustering if manual is enabled.
+ $('#manual').on('click', function () {
+ if ($('#manual').is(":checked")) {
+ $('#cluster').prop('checked', false);
+ }
+ });
+
+ var env_name = $(this).val();
+ set_solvers(env_name);
+
+ $("#run-environment").change(function () {
+ var env_name = $(this).val();
+ set_solvers(env_name);
+ });
+
+});
+
+
+function set_solvers(env_name) {
+ $.ajax({
+ url: '/' + LANGUAGE_CODE + '/api/solvers/',
+ data: {
+ 'env_name': env_name,
+ },
+ success: function(data) {
+ var solvers = $('#run-solvers');
+ solvers.empty();
+ $.each(data, function(index, item) {
+ var key = item.name;
+ var value = item.pretty_name;
+ solvers.append($('').attr('value', key).text(value));
+ });
+ }
+ });
+}
+
+
+function add_run_precheck() {
+ var model_uuid = $('#header').data('model_uuid'),
+ scenario_id = $("#scenario").data('scenario_id');
+ $.ajax({
+ url: '/' + LANGUAGE_CODE + '/component/add_run_precheck/',
+ data: {
+ 'model_uuid': model_uuid,
+ 'scenario_id': scenario_id,
+ },
+ dataType: 'json',
+ success: function (data) {
+ $('#add_run_precheck').html(data['html']);
+ render_gantt();
+ activate_tiles();
+ }
+ });
+};
+
+
+function activate_tiles() {
+ $('.selection_tile').on('click', function () {
+ var start_date = $(this).data('start_date'),
+ end_date = $(this).data('end_date');
+ $('.selection_tile').removeClass('btn-outline-primary')
+ $(this).addClass('btn-outline-primary')
+ $('#start_date').val(start_date);
+ $('#end_date').val(end_date);
+ })
+}
+
+function render_gantt() {
+
+ var data = $('#timeseries_gantt').data('timeseries');
+
+ var margin = { top: 40, right: 40, bottom: 20, left: 40 },
+ width = $('#timeseries_gantt').width() - margin.left - margin.right,
+ bar_height = 16
+ height = (bar_height + 4) * data.length;
+
+ // Prep data
+ var parseDate = d3.timeParse("%m/%d/%Y, %H:%M:%S");
+ data.forEach(function (d) {
+ d.node = d[0]
+ d.parameter = d[1]
+ d.start_date = parseDate(d[2]);
+ d.end_date = parseDate(d[3]);
+ });
+
+ // X Axis
+ var start_date = d3.min(data, function (d) { return d.start_date }),
+ end_date = d3.max(data, function (d) { return d.end_date });
+ var x = d3.scaleTime()
+ .domain([start_date, end_date])
+ .range([0, width]);
+ var xAxis = d3.axisTop()
+ .scale(x);
+
+ // Y Axis
+ var y = d3.scaleLinear()
+ .domain([data.length, 0])
+ .range([height, 0]);
+
+
+ // Draw
+ var svg = d3.select("#timeseries_gantt").append("svg")
+ .attr("width", width + margin.left + margin.right)
+ .attr("height", height + margin.top + margin.bottom)
+ .append("g")
+ .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
+
+ // Define the div for the tooltip
+ var tooltip = d3.select("body").append("div")
+ .attr("class", "tooltip")
+ .style("background-color", "white")
+ .style("border", "solid 3px black")
+ .style("padding", "5px")
+ .style("opacity", 0);
+
+ svg.append("g")
+ .attr("class", "x axis")
+ .style("font-size", "1.2em")
+ .call(xAxis)
+ var g = svg.selectAll()
+ .data(data).enter().append("g");
+
+ g.append("rect")
+ .attr("height", bar_height)
+ .attr("width", function (d) { return x(end_date) - x(start_date); })
+ .attr("x", function (d) { return x(start_date); })
+ .attr("y", function (d, i) { return y(i) + (bar_height / 2); })
+ .style("fill", "red")
+ .style("opacity", "0.2");
+
+ g.append("rect")
+ .attr("height", bar_height)
+ .attr("width", function (d) { return x(d.end_date) - x(d.start_date); })
+ .attr("x", function (d) { return x(d.start_date); })
+ .attr("y", function (d, i) { return y(i) + (bar_height / 2); })
+ .style("fill", "green")
+ .on("mouseover", function (d) {
+ tooltip.transition()
+ .duration(200)
+ .style("opacity", 1);
+ tooltip.html("" + d.node + "
" + d.parameter)
+ .style("left", (d3.event.pageX - 100) + "px")
+ .style("top", (d3.event.pageY - 50) + "px");
+ })
+ .on("mouseout", function (d) {
+ tooltip.transition()
+ .duration(500)
+ .style("opacity", 0);
+ });
+
+};
diff --git a/calliope_app/client/static/js/scenarios.js b/calliope_app/client/static/js/scenarios.js
index 2dad7329..d98a166d 100644
--- a/calliope_app/client/static/js/scenarios.js
+++ b/calliope_app/client/static/js/scenarios.js
@@ -786,11 +786,11 @@ function activate_scenario_settings() {
});
$('#settings_weights_import_data').on('click', function() {
- dialogObj["monetary"] = $("#monetary").val();
- dialogObj["co2"] = $("#co2").val();
- dialogObj["ch4"] = $("#ch4").val();
- dialogObj["n2o"] = $("#n2o").val();
- dialogObj["co2e"] = $("#co2e").val();
+ dialogObj["monetary"] = !isNaN(parseFloat($("#monetary").val())) ? parseFloat($("#monetary").val()) : $("#monetary").val();
+ dialogObj["co2"] = !isNaN(parseFloat($("#co2").val())) ? parseFloat($("#co2").val()) : $("#co2").val();
+ dialogObj["ch4"] = !isNaN(parseFloat($("#ch4").val())) ? parseFloat($("#ch4").val()) : $("#ch4").val();
+ dialogObj["n2o"] = !isNaN(parseFloat($("#n2o").val())) ? parseFloat($("#n2o").val()) : $("#n2o").val();
+ dialogObj["co2e"] = !isNaN(parseFloat($("#co2e").val())) ? parseFloat($("#co2e").val()) : $("#co2e").val();
$('textarea[name="edit' + dialogInputId + '"]').text(JSON.stringify(dialogObj, undefined, 2));
$('#scenario_weights_json_form').hide();
diff --git a/calliope_app/client/templates/add_run.html b/calliope_app/client/templates/add_run.html
index 9433d66f..4c670a69 100644
--- a/calliope_app/client/templates/add_run.html
+++ b/calliope_app/client/templates/add_run.html
@@ -18,9 +18,9 @@
background-color: #192733;
border-radius: 10px;
padding: 10px;
- left: 0%;
+ left: 0%;
margin-left: 10px; /* Offset it by 10 pixels to the right */
- transform: translateY(10px);
+ transform: translateY(10px);
}
.hover-text:hover .tooltip-text {
@@ -45,7 +45,7 @@
{% endblock %}
{% block config_runs %}