@@ -45,7 +45,11 @@ def __init__(self, *args, **kwargs):
4545 ** kwargs ,
4646 death_timeout = 60 * 5 , # 5min
4747 python = "python3" ,
48+ #submit_command_extra = "-spool"
4849 )
50+ self .submit_command = "./job_submit.sh"
51+ self .cancel_command = "./job_rm.sh"
52+ self .executable = "/bin/bash"
4953
5054
5155##
@@ -81,7 +85,12 @@ def __init__(self, *args, **kwargs):
8185sched_port = int (os .environ .get ("SCHED_PORT" , "42000" ))
8286dash_port = int (os .environ .get ("DASH_PORT" , "42001" ))
8387controller_port = int (os .environ .get ("CONTROLLER_PORT" , "42002" ))
84- singularity_wn_image = os .environ .get ("SINGULARITY_WN_IMAGE" , "/cvmfs/images.dodas.infn.it/registry.hub.docker.com/dodasts/root-in-docker:ubuntu22-kernel-v1" )
88+ singularity_wn_image = os .environ .get ("SINGULARITY_WN_IMAGE" , "/cvmfs/unpacked.cern.ch/registry.hub.docker.com/dodasts/root-in-docker:ubuntu22-kernel-v1" )
89+
90+ user_cores = int (os .environ .get ("USER_CORES" , 1 ))
91+ user_memory = os .environ .get ("USER_MEMORY" , "2 GiB" )
92+ if user_memory == "" :
93+ user_memory = "2 GiB"
8594
8695logger .debug (f"name: { name } " )
8796logger .debug (f"token: { token } " )
@@ -173,13 +182,16 @@ def _worker_spec(self) -> dict:
173182 def run (self ):
174183 self .cluster = HTCondorCluster (
175184 job_cls = MyHTCondorJob ,
176- cores = 1 ,
177- memory = "2 GiB" , # ref: https://github.com/dask/dask/blob/e4799c0498b5e5877705bb5542d8d01116ee1320/dask/utils.py#L1404
185+ cores = user_cores ,
186+ memory = user_memory ,
187+ #cores=1,
188+ #memory="2 GiB", # ref: https://github.com/dask/dask/blob/e4799c0498b5e5877705bb5542d8d01116ee1320/dask/utils.py#L1404
178189 disk = "1 GB" ,
179190 scheduler_options = scheduler_options_vars ,
180191 job_extra = job_extra_vars ,
181192 # silence_logs="debug",
182193 local_directory = "./scratch" ,
194+ job_script_prologue = ['eval "$(conda shell.bash hook)"' ]
183195 )
184196
185197 while self .cluster .status != Status .running :
@@ -296,7 +308,7 @@ def run(self):
296308async def tunnel_scheduler ():
297309 logger .debug ("start tunnel scheduler" )
298310 connection = await asyncssh .connect (
299- "jhub.131.154.96.124 .myip.cloud.infn.it" ,
311+ "jhub.131.154.98.185 .myip.cloud.infn.it" ,
300312 port = 31022 ,
301313 username = name ,
302314 password = token ,
@@ -311,7 +323,7 @@ async def tunnel_scheduler():
311323async def tunnel_dashboard ():
312324 logger .debug ("start tunnel dashboard" )
313325 connection = await asyncssh .connect (
314- "jhub.131.154.96.124 .myip.cloud.infn.it" ,
326+ "jhub.131.154.98.185 .myip.cloud.infn.it" ,
315327 port = 31022 ,
316328 username = name ,
317329 password = token ,
@@ -326,7 +338,7 @@ async def tunnel_dashboard():
326338async def tunnel_controller ():
327339 logger .debug ("start tunnel controller" )
328340 connection = await asyncssh .connect (
329- "jhub.131.154.96.124 .myip.cloud.infn.it" ,
341+ "jhub.131.154.98.185 .myip.cloud.infn.it" ,
330342 port = 31022 ,
331343 username = name ,
332344 password = token ,
@@ -389,7 +401,7 @@ def get(self):
389401 def prepare (self ):
390402 logger .debug (self .request .arguments )
391403
392-
404+
393405class LogsHandler (tornado .web .RequestHandler ):
394406 def initialize (self , sched_q : Queue , controller_q : Queue ):
395407 self .sched_q : Queue = sched_q
@@ -424,22 +436,18 @@ async def get(self):
424436 font-size: 15px;
425437 border-bottom:
426438 }
427-
428439 .active, .collapsible:hover {
429440 background-color: #ec8f72;
430441 }
431-
432442 .content {
433443 padding: 0 18px;
434444 display: none;
435445 overflow: hidden;
436446 background-color: #fafafa;
437447 }
438-
439448 table, th, td {
440449 border: 1px solid black;
441450 }
442-
443451 table {
444452 width: 100%;
445453 }
@@ -545,7 +553,6 @@ async def get(self):
545553 """<script>
546554 var coll = document.getElementsByClassName("collapsible");
547555 var i;
548-
549556 for (i = 0; i < coll.length; i++) {
550557 coll[i].addEventListener("click", function() {
551558 this.classList.toggle("active");
@@ -557,20 +564,16 @@ async def get(self):
557564 }
558565 });
559566 }
560-
561567 window.onscroll = function() {myFunction()};
562-
563568 var header = document.getElementById("myHeader");
564569 var sticky = header.offsetTop;
565-
566570 function myFunction() {
567571 if (window.pageYOffset > sticky) {
568572 header.classList.add("sticky");
569573 } else {
570574 header.classList.remove("sticky");
571575 }
572576 }
573-
574577 var origin_location = window.location.href;
575578 function reload() {
576579 window.location.href = origin_location;
@@ -666,7 +669,6 @@ def initialize(self, sched_q: Queue, controller_q: Queue):
666669
667670 def get (self ):
668671 """Return a descriptive dictionary of worker specs.
669-
670672 Example worker_spec:
671673 {
672674 "HTCondorCluster-0": {
0 commit comments