From 0a7664c0515afff91df5f7a379c6dbad324465f2 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Fri, 6 Jun 2025 21:42:49 +1000 Subject: [PATCH 01/30] ftp server testing --- .gitignore | 1 + configs/lcs1/obs_config.py | 16 +++- devices/camera.py | 43 +++++++-- obs.py | 156 +++++++++++++++++++++++++++++++- subprocesses/fz_archive_file.py | 23 +++-- 5 files changed, 214 insertions(+), 25 deletions(-) diff --git a/.gitignore b/.gitignore index 06af92e84..9c9f3528a 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,4 @@ output.txt aftersourceplots.png beforesourceplots.png brightstarplots.png +ftpsecrets.json diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index 5cd4414fd..0d2f922dd 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -82,13 +82,21 @@ 'always_do_a_centering_exposure_regardless_of_nearby_reference': True, # Setup of folders on local and network drives. - 'ingest_raws_directly_to_archive': True, - 'push_file_list_to_pipe_queue': True, # This being true means the above needs to be true also. + 'ingest_raws_directly_to_archive': False, + 'push_file_list_to_pipe_queue': False, # This being true means the above needs to be true also. # LINKS TO PIPE FOLDER - 'save_raws_to_pipe_folder_for_nightly_processing': False, + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'ftp', # Can also be 'ftp' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + + + + + 'pipe_archive_folder_path': 'X:/localptrarchive/', #WER changed Z to X 20231113 @1:16 UTC - 'temporary_local_pipe_archive_to_hold_files_while_copying' : 'F:/tempfolderforpipeline', + #'temporary_local_pipe_archive_to_hold_files_while_copying' : 'F:/tempfolderforpipeline', # Setup of folders on local and network drives. 'client_hostname': 'LCC1', 'archive_path': 'C:/ptr/', diff --git a/devices/camera.py b/devices/camera.py index f5959c230..6f53a24f1 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -3920,7 +3920,7 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): token_name=token_name + str(self.next_seq) - if self.site_config['save_raws_to_pipe_folder_for_nightly_processing']: + if self.site_config['save_images_to_pipe_for_processing']: if len(real_time_files) > 0: # This is the failsafe directory.... if it can't be written to the PIPE folder @@ -3933,10 +3933,13 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): if not os.path.exists(failsafe_directory+ '/tokens'): os.umask(0) os.makedirs(failsafe_directory+ '/tokens') - + try: - pipetokenfolder = self.site_config['pipe_archive_folder_path'] + '/tokens' - if not os.path.exists(self.site_config['pipe_archive_folder_path'] + '/tokens'): + if self.site_config['pipe_save_method'] == 'ftp': + pipetokenfolder = self.site_config['ftp_ingestion_folder'] + else: + pipetokenfolder = self.site_config['pipe_archive_folder_path'] + '/tokens' + if not os.path.exists(pipetokenfolder): os.umask(0) os.makedirs(self.site_config['pipe_archive_folder_path'] + '/tokens', mode=0o777) @@ -3948,14 +3951,42 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): for tempfilename in real_time_files: temp_file_holder.append(tempfilename.replace('-EX00.', f'{suffix}-EX00.')) try: - with open(f"{pipetokenfolder}/{token_name}{suffix}", 'w') as f: + # with open(f"{pipetokenfolder}/{token_name}{suffix}", 'w') as f: + # json.dump(temp_file_holder, f, indent=2) + real_path = f"{pipetokenfolder}/{token_name}{suffix}" + temp_path = real_path + ".tmp" + + # 1. Write to “.tmp” + with open(temp_path, "w") as f: json.dump(temp_file_holder, f, indent=2) + f.flush() + os.fsync(f.fileno()) + + # 2. Rename to the real filename + os.replace(temp_path, real_path) + + if self.site_config['pipe_save_method'] == 'ftp': + g_dev['obs'].add_to_ftpqueue(pipetokenfolder, str(token_name)+str(suffix)) + except: plog(traceback.format_exc()) else: try: - with open(pipetokenfolder + "/" + token_name, 'w') as f: + # with open(pipetokenfolder + "/" + token_name, 'w') as f: + # json.dump(real_time_files, f, indent=2) + real_path = os.path.join(pipetokenfolder, token_name) + temp_path = real_path + ".tmp" + + # 1. Write into the “.tmp” file + with open(temp_path, "w") as f: json.dump(real_time_files, f, indent=2) + f.flush() + os.fsync(f.fileno()) + + # 2. Atomically replace (or create) the real file + os.replace(temp_path, real_path) + if self.site_config['pipe_save_method'] == 'ftp': + g_dev['obs'].add_to_ftpqueue(pipetokenfolder, token_name) except: plog(traceback.format_exc()) except: diff --git a/obs.py b/obs.py index 4d5c1fb2e..031f2b63e 100644 --- a/obs.py +++ b/obs.py @@ -12,7 +12,7 @@ from dotenv import load_dotenv load_dotenv(".env") import ocs_ingester.exceptions - +import ftplib from ocs_ingester.ingester import upload_file_and_ingest_to_archive from requests.adapters import HTTPAdapter, Retry @@ -34,7 +34,7 @@ import subprocess import pickle import argparse - +from queue import Empty from astropy.io import fits from astropy.utils.data import check_download_cache from astropy.coordinates import get_sun, SkyCoord, AltAz @@ -72,7 +72,40 @@ status_forcelist=[500, 502, 503, 504]) reqs.mount("http://", HTTPAdapter(max_retries=retries)) - +def ftp_upload_files(server, port, username, password, remote_dir, local_dir, filenames): + """ + Connects to the FTP server, changes to remote_dir, uploads each file in `filenames` + (which are relative to local_dir), then closes the connection. + """ + ftp = ftplib.FTP() + try: + # 1) Connect and log in + ftp.connect(server, port, timeout=30) # e.g. timeout=30 seconds + ftp.login(username, password) + + # 2) Switch to the target directory + ftp.cwd(remote_dir) + + # 3) Upload each file + for fname in filenames: + local_path = os.path.join(local_dir, fname) + if not os.path.isfile(local_path): + print(f" [!] Skipping {local_path}: not found on disk.") + continue + + with open(local_path, "rb") as f: + # 'STOR fname' will store the file with the same name on the remote side + ftp.storbinary(f"STOR {fname}", f) + print(f" Uploaded: {fname}") + + except ftplib.all_errors as e: + print(f"[ERROR] FTP upload failed: {e}") + finally: + # 4) Always quit (closing the connection) + try: + ftp.quit() + except Exception: + ftp.close() def test_connect(host="http://google.com"): # This just tests the net connection @@ -585,6 +618,26 @@ def create_directories(base_path, camera_name, subdirectories): self.reporttonightlog_queue_thread.start() + if self.config['save_images_to_pipe_for_processing'] and self.config['pipe_save_method'] == 'ftp': + + with open('ftpsecrets.json', "r") as f: + cfg = json.load(f) + + self.fitserver = cfg["SERVER"] + self.ftpport = cfg["PORT"] + self.ftpusername = cfg["USERNAME"] + self.ftppassword = cfg["PASSWORD"] + self.ftpremotedir = cfg["REMOTE_DIR"] + + self.ftp_queue = queue.Queue(maxsize=0) + self.ftp_queue_thread = threading.Thread( + target=self.ftp_process, args=() + ) + self.ftp_queue_thread.daemon = True + self.ftp_queue_thread.start() + + + self.queue_reporting_period = 60 @@ -3008,7 +3061,99 @@ def reporttonightlog_process(self): else: time.sleep(0.25) + # def ftp_process(self): + # """This is a thread where files are ingested by ftp to a pipe server + # """ + + # while True: + # if not self.ftp_queue.empty(): + # while not self.ftp_queue.empty(): + # try: + # (filedirectory, filename, timestamp) = self.ftp_queue.get(block=False) + + + + # ftp_upload_files(self.fitserver ,self.ftpport, self.ftpusername, self.ftppassword , self.ftpremotedir, filedirectory, filename) + # except: + # plog("Night Log did not write, usually not fatal.") + # plog(traceback.format_exc()) + + # self.ftp_queue.task_done() + # else: + # time.sleep(0.25) + + def ftp_process(self): + """This is a thread where files are ingested by ftp to a pipe server.""" + while True: + # ─── 1) Scan ingestion folder for new `.fits.fz` files ─── + try: + ingestion_folder = self.site_config['ftp_ingestion_folder'] + entries = os.listdir(ingestion_folder) + except Exception as e: + plog(f"Could not list '{ingestion_folder}': {e}") + entries = [] + + # Grab a snapshot of everything already in the queue + # (since queue.Queue() doesn't have a direct 'contains' method, + # we peek at its internal deque via .queue) + try: + queued_items = list(self.ftp_queue.queue) + except Exception: + queued_items = [] + + for fname in entries: + if not fname.endswith('.fits.fz'): + continue + + full_path = os.path.join(ingestion_folder, fname) + + # Check if (ingestion_folder, fname) is already queued + already_queued = any( + (item[0] == ingestion_folder and item[1] == fname) + for item in queued_items + if isinstance(item, tuple) and len(item) >= 2 + ) + if not already_queued: + try: + # Use file‐modification time as the “timestamp” + ts = os.path.getmtime(full_path) + except Exception: + ts = time.time() + + # Enqueue a tuple: (filedirectory, filename, timestamp) + self.ftp_queue.put((ingestion_folder, fname, ts)) + plog(f"Enqueued new FTP file: {fname}") + + # ─── 2) Once scanning is done, process whatever is in ftp_queue ─── + if not self.ftp_queue.empty(): + # As long as there are items, keep pulling them off + while True: + try: + (filedirectory, filename, timestamp) = self.ftp_queue.get(block=False) + except Empty: + # No more items right now; break back to top of outer loop + break + + try: + ftp_upload_files( + self.fitserver, + self.ftpport, + self.ftpusername, + self.ftppassword, + self.ftpremotedir, + filedirectory, + filename + ) + except Exception: + plog("Night Log did not write, usually not fatal.") + plog(traceback.format_exc()) + + # Mark this item as done + self.ftp_queue.task_done() + else: + # If queue was empty (and after scanning), sleep briefly + time.sleep(0.25) def platesolve_process(self): @@ -4293,9 +4438,10 @@ def send_to_user(self, p_log, p_level="INFO"): self.sendtouser_queue.put((p_log, p_level), block=False) def report_to_nightlog(self, log): - # This is now a queue--- it was actually slowing - # everything down each time this was called! self.reporttonightlog_queue.put((log, time.time()), block=False) + + def add_to_ftpqueue(self, filedirectory, filename): + self.ftp_queue.put((filedirectory, filename, time.time()), block=False) def check_platesolve_and_nudge(self, no_confirmation=True): """ diff --git a/subprocesses/fz_archive_file.py b/subprocesses/fz_archive_file.py index 187bbe78f..59359b035 100644 --- a/subprocesses/fz_archive_file.py +++ b/subprocesses/fz_archive_file.py @@ -46,7 +46,7 @@ def print(*args): #### FZ Compression can't handle NAN so we need to use a sentinal value -#### In our case, we use -512.3456789. This is low enough that it is highly +#### In our case, we use -251.2345733642578. This is low enough that it is highly #### unlikely that a pixel would have this real value in the history of the universe #### But not so low it is impossible to use fits browsers actual_data=np.array(slow_process[2],dtype=np.float32) @@ -75,19 +75,22 @@ def print(*args): # BUT it actually compresses to the same size either way temphduheader["BZERO"] = 0 # Make sure there is no integer scaling left over temphduheader["BSCALE"] = 1 # Make sure there is no integer scaling left over -if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: +if selfconfig["save_images_to_pipe_for_processing"]: if not os.path.exists(failsafe_directory): os.umask(0) os.makedirs(failsafe_directory) try: - pipefolder = selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS']) - if not os.path.exists(selfconfig['pipe_archive_folder_path']+'/'+ str(temphduheader['INSTRUME'])): - os.umask(0) - os.makedirs(selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME'])) - if not os.path.exists(selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS'])): - os.umask(0) - os.makedirs(selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS'])) + if selfconfig['pipe_save_method'] == 'ftp': + pipefolder = selfconfig['ftp_ingestion_folder'] + else: + pipefolder = selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS']) + if not os.path.exists(selfconfig['pipe_archive_folder_path']+'/'+ str(temphduheader['INSTRUME'])): + os.umask(0) + os.makedirs(selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME'])) + if not os.path.exists(selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS'])): + os.umask(0) + os.makedirs(selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS'])) except: print ("looks like an error making the pipe archive folder path") @@ -183,7 +186,7 @@ def print(*args): ) del actual_data - if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: + if selfconfig["save_images_to_pipe_for_processing"]: try: hdufz.writeto( pipefolder + '/' + str(temphduheader['ORIGNAME']).replace('.fits.fz','.tempfits.fz'), overwrite=True From d3703a70a9e7171704154b4af3f0df4a9c2c4964 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 7 Jun 2025 12:15:42 +1000 Subject: [PATCH 02/30] ftp stuff --- devices/sequencer.py | 13 ++- obs.py | 179 +++++++++++++++++++++++++++----- subprocesses/fz_archive_file.py | 10 +- 3 files changed, 169 insertions(+), 33 deletions(-) diff --git a/devices/sequencer.py b/devices/sequencer.py index 0a64e6295..0d23a1232 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -289,7 +289,7 @@ def attempt_to_copy_failed_pipe_files(self): def copy_failed_pipe_files_thread(self): - if self.config['save_raws_to_pipe_folder_for_nightly_processing']: + if self.config['save_images_to_pipe_for_processing']: try: failsafe_directory=self.config['archive_path'] + 'failsafe' if not os.path.exists(failsafe_directory): @@ -326,11 +326,16 @@ def copy_failed_pipe_files_thread(self): shutil.move(tempfile, pipefolder) except: plog(traceback.format_exc()) + + if self.config['pipe_save_method'] == 'ftp': + pipetokenfolder = self.config['ftp_ingestion_folder'] + else: + pipetokenfolder = self.config['pipe_archive_folder_path'] + '/tokens' - pipetokenfolder = self.config['pipe_archive_folder_path'] + '/tokens' - if not os.path.exists(self.config['pipe_archive_folder_path'] + '/tokens'): + # pipetokenfolder = self.config['pipe_archive_folder_path'] + '/tokens' + if not os.path.exists(pipetokenfolder): os.umask(0) - os.makedirs(self.config['pipe_archive_folder_path'] + '/tokens', mode=0o777) + os.makedirs(pipetokenfolder, mode=0o777) # Copy over the token files diff --git a/obs.py b/obs.py index 031f2b63e..f1706763b 100644 --- a/obs.py +++ b/obs.py @@ -72,40 +72,170 @@ status_forcelist=[500, 502, 503, 504]) reqs.mount("http://", HTTPAdapter(max_retries=retries)) -def ftp_upload_files(server, port, username, password, remote_dir, local_dir, filenames): +# def ftp_upload_files(server, port, username, password, remote_dir, local_dir, filenames): +# """ +# Connects to the FTP server, changes to remote_dir, uploads each file in `filenames` +# (which are relative to local_dir), then closes the connection. +# """ +# ftp = ftplib.FTP() +# try: +# # 1) Connect and log in +# ftp.connect(server, port, timeout=30) # e.g. timeout=30 seconds +# ftp.login(username, password) + +# # 2) Switch to the target directory +# ftp.cwd(remote_dir) + +# # 3) Upload each file +# for fname in filenames: +# local_path = os.path.join(local_dir, fname) +# if not os.path.isfile(local_path): +# print(f" [!] Skipping {local_path}: not found on disk.") +# continue + +# with open(local_path, "rb") as f: +# # 'STOR fname' will store the file with the same name on the remote side +# ftp.storbinary(f"STOR {fname}", f) +# print(f" Uploaded: {fname}") + + +# except: + +# plog(traceback.format_exc()) +# breakpoint() + +# # except ftplib.all_errors as e: +# # print(f"[ERROR] FTP upload failed: {e}") +# finally: +# # 4) Always quit (closing the connection) +# try: +# ftp.quit() +# except Exception: +# ftp.close() + + +# def ftp_upload_files( +# server: str, +# port: int, +# username: str, +# password: str, +# remote_dir: str, +# local_dir: str, +# filenames: list[str], +# use_tls: bool = False +# ): +# """ +# Connects to the FTP server, changes to remote_dir, uploads each file in `filenames` +# (which are relative to local_dir), then closes the connection. + +# If use_tls is True, will use FTP_TLS instead of plain FTP. +# """ +# # Choose FTP or FTP_TLS based on whether vsftpd is set up for TLS +# if use_tls: +# ftp = ftplib.FTP_TLS() +# else: +# ftp = ftplib.FTP() + +# try: +# # 1) Enable debug output (very helpful for diagnosing protocol failures) +# ftp.set_debuglevel(2) + +# # 2) Connect and log in +# ftp.connect(server, port, timeout=30) +# ftp.login(username, password) + +# # 3) If using FTP_TLS, switch to secure data channel +# if use_tls: +# ftp.prot_p() # “Protection Level: Private” for data + +# # 4) Make sure you’re in the correct transfer mode (passive/active) +# # By default ftplib is passive, but explicitly set it: +# ftp.set_pasv(True) +# # If passive keeps hanging, try ftp.set_pasv(False) instead. + +# # 5) Switch to the target directory +# ftp.cwd(remote_dir) + +# # 6) Upload each file +# for fname in filenames: +# local_path = os.path.join(local_dir, fname) +# if not os.path.isfile(local_path): +# print(f" [!] Skipping {local_path}: not found on disk.") +# continue + +# with open(local_path, "rb") as f: +# # 'STOR ' stores the file under the same name +# ftp.storbinary(f"STOR {fname}", f) +# print(f" Uploaded: {fname}") + +# except ftplib.all_errors as e: +# print("[ERROR] FTP upload failed:") +# traceback.print_exc() +# breakpoint() +# finally: +# # 7) Always quit (closing the connection) +# try: +# ftp.quit() +# except Exception: +# ftp.close() + +import ftputil +import os + +def ftp_upload_with_ftputil( + server: str, + port: int, + username: str, + password: str, + remote_dir: str, + local_dir: str, + filenames: list[str], + use_passive: bool = True, + timeout: int = 30 +): """ - Connects to the FTP server, changes to remote_dir, uploads each file in `filenames` - (which are relative to local_dir), then closes the connection. + Uses ftputil to connect and upload a list of files. """ - ftp = ftplib.FTP() + # ftputil’s FTPHost automatically handles connect/login under the hood. + try: - # 1) Connect and log in - ftp.connect(server, port, timeout=30) # e.g. timeout=30 seconds - ftp.login(username, password) + host = ftputil.FTPHost( + host=server, + user=username, + passwd=password, + port=port, + timeout=timeout, + ) + # Toggle passive mode if needed (default is True) + host.session.set_pasv(use_passive) - # 2) Switch to the target directory - ftp.cwd(remote_dir) + except: + plog(traceback.format_exc()) + breakpoint() + + try: + # Ensure the remote directory exists (mkdirs=True will create nested dirs) + host.makedirs(remote_dir, exist_ok=True) - # 3) Upload each file for fname in filenames: local_path = os.path.join(local_dir, fname) if not os.path.isfile(local_path): - print(f" [!] Skipping {local_path}: not found on disk.") + print(f"[!] Skipping {local_path} (not found).") continue - with open(local_path, "rb") as f: - # 'STOR fname' will store the file with the same name on the remote side - ftp.storbinary(f"STOR {fname}", f) - print(f" Uploaded: {fname}") + remote_path = host.path.join(remote_dir, fname) + print(f"Uploading {local_path} → {remote_path}…", end=" ") + # upload_if_newer only sends if local is newer or remote missing; + # you can also use host.upload(local_path, remote_path) to force. + host.upload_if_newer(local_path, remote_path) + print("done.") - except ftplib.all_errors as e: - print(f"[ERROR] FTP upload failed: {e}") + + + except Exception as e: + print("[ERROR] ftputil upload failed:", e) finally: - # 4) Always quit (closing the connection) - try: - ftp.quit() - except Exception: - ftp.close() + host.close() def test_connect(host="http://google.com"): # This just tests the net connection @@ -3088,7 +3218,7 @@ def ftp_process(self): while True: # ─── 1) Scan ingestion folder for new `.fits.fz` files ─── try: - ingestion_folder = self.site_config['ftp_ingestion_folder'] + ingestion_folder = self.config['ftp_ingestion_folder'] entries = os.listdir(ingestion_folder) except Exception as e: plog(f"Could not list '{ingestion_folder}': {e}") @@ -3136,7 +3266,8 @@ def ftp_process(self): break try: - ftp_upload_files( + print ("TRYING FTP") + ftp_upload_with_ftputil( self.fitserver, self.ftpport, self.ftpusername, diff --git a/subprocesses/fz_archive_file.py b/subprocesses/fz_archive_file.py index 59359b035..f07adbee0 100644 --- a/subprocesses/fz_archive_file.py +++ b/subprocesses/fz_archive_file.py @@ -255,7 +255,7 @@ def print(*args): np.array(newhdured, dtype=np.float32), temphduheader ) - if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: + if selfconfig['save_images_to_pipe_for_processing']: try: @@ -298,7 +298,7 @@ def print(*args): np.array(GTRonly, dtype=np.float32), temphduheader ) - if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: + if selfconfig['save_images_to_pipe_for_processing']: try: hdufz.writeto(pipefolder + '/' + str(temphduheader['ORIGNAME'].replace('.fits','.tempfits')), overwrite=True) os.rename(pipefolder + '/' + str(temphduheader['ORIGNAME']).replace('.fits','.tempfits'),pipefolder + '/' + str(temphduheader['ORIGNAME'])) @@ -335,7 +335,7 @@ def print(*args): np.array(GBLonly, dtype=np.float32), temphduheader ) - if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: + if selfconfig['save_images_to_pipe_for_processing']: try: hdufz.writeto( pipefolder + '/' + str(temphduheader['ORIGNAME'].replace('.fits','.tempfits')), overwrite=True @@ -376,7 +376,7 @@ def print(*args): np.array(newhdublue, dtype=np.float32), temphduheader ) - if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: + if selfconfig['save_images_to_pipe_for_processing']: hdufz.writeto( pipefolder + '/' + str(temphduheader['ORIGNAME']).replace('.fits','.tempfits'), overwrite=True @@ -412,7 +412,7 @@ def print(*args): np.array(clearV, dtype=np.float32), temphduheader ) - if selfconfig['save_raws_to_pipe_folder_for_nightly_processing']: + if selfconfig['save_images_to_pipe_for_processing']: try: hdufz.writeto( pipefolder + '/' + str(temphduheader['ORIGNAME']).replace('.fits','.tempfits'), overwrite=True From 5e7c52eada61b3e81b39b9fbac9767caa39e9ad4 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 7 Jun 2025 16:21:41 +1000 Subject: [PATCH 03/30] Update .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 9c9f3528a..9d265e328 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,4 @@ aftersourceplots.png beforesourceplots.png brightstarplots.png ftpsecrets.json +httpsecrets.json From 2ec8f92930322c5930c03b88cf48df9c3221e681 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Sat, 7 Jun 2025 16:24:53 +1000 Subject: [PATCH 04/30] http ingestion --- configs/lcs1/obs_config.py | 3 +- devices/camera.py | 6 + obs.py | 239 +++++++++++++++----------------- subprocesses/fz_archive_file.py | 2 + 4 files changed, 120 insertions(+), 130 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index 0d2f922dd..7c3641f0a 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -87,9 +87,10 @@ # LINKS TO PIPE FOLDER 'save_images_to_pipe_for_processing': True, - 'pipe_save_method': 'ftp', # Can also be 'ftp' for that transfer but also 'local' pipe for a local LAN pipe server + 'pipe_save_method': 'http', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', diff --git a/devices/camera.py b/devices/camera.py index 6f53a24f1..e03bd80bd 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -3937,6 +3937,8 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): try: if self.site_config['pipe_save_method'] == 'ftp': pipetokenfolder = self.site_config['ftp_ingestion_folder'] + elif self.site_config['pipe_save_method'] == 'http': + pipetokenfolder = self.site_config['http_ingestion_folder'] else: pipetokenfolder = self.site_config['pipe_archive_folder_path'] + '/tokens' if not os.path.exists(pipetokenfolder): @@ -3967,6 +3969,8 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): if self.site_config['pipe_save_method'] == 'ftp': g_dev['obs'].add_to_ftpqueue(pipetokenfolder, str(token_name)+str(suffix)) + if self.site_config['pipe_save_method'] == 'http': + g_dev['obs'].add_to_httpqueue(pipetokenfolder, str(token_name)+str(suffix)) except: plog(traceback.format_exc()) @@ -3987,6 +3991,8 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): os.replace(temp_path, real_path) if self.site_config['pipe_save_method'] == 'ftp': g_dev['obs'].add_to_ftpqueue(pipetokenfolder, token_name) + elif self.site_config['pipe_save_method'] == 'http': + g_dev['obs'].add_to_httpqueue(pipetokenfolder, token_name) except: plog(traceback.format_exc()) except: diff --git a/obs.py b/obs.py index f1706763b..a8829660e 100644 --- a/obs.py +++ b/obs.py @@ -72,115 +72,24 @@ status_forcelist=[500, 502, 503, 504]) reqs.mount("http://", HTTPAdapter(max_retries=retries)) -# def ftp_upload_files(server, port, username, password, remote_dir, local_dir, filenames): -# """ -# Connects to the FTP server, changes to remote_dir, uploads each file in `filenames` -# (which are relative to local_dir), then closes the connection. -# """ -# ftp = ftplib.FTP() -# try: -# # 1) Connect and log in -# ftp.connect(server, port, timeout=30) # e.g. timeout=30 seconds -# ftp.login(username, password) - -# # 2) Switch to the target directory -# ftp.cwd(remote_dir) - -# # 3) Upload each file -# for fname in filenames: -# local_path = os.path.join(local_dir, fname) -# if not os.path.isfile(local_path): -# print(f" [!] Skipping {local_path}: not found on disk.") -# continue - -# with open(local_path, "rb") as f: -# # 'STOR fname' will store the file with the same name on the remote side -# ftp.storbinary(f"STOR {fname}", f) -# print(f" Uploaded: {fname}") - - -# except: - -# plog(traceback.format_exc()) -# breakpoint() - -# # except ftplib.all_errors as e: -# # print(f"[ERROR] FTP upload failed: {e}") -# finally: -# # 4) Always quit (closing the connection) -# try: -# ftp.quit() -# except Exception: -# ftp.close() - - -# def ftp_upload_files( -# server: str, -# port: int, -# username: str, -# password: str, -# remote_dir: str, -# local_dir: str, -# filenames: list[str], -# use_tls: bool = False -# ): -# """ -# Connects to the FTP server, changes to remote_dir, uploads each file in `filenames` -# (which are relative to local_dir), then closes the connection. +import ftputil + + + + +def http_upload(server, filedirectory,filename): -# If use_tls is True, will use FTP_TLS instead of plain FTP. -# """ -# # Choose FTP or FTP_TLS based on whether vsftpd is set up for TLS -# if use_tls: -# ftp = ftplib.FTP_TLS() -# else: -# ftp = ftplib.FTP() + url = "http://110.143.205.252/archive_upload" + files = {"file": open(filedirectory +'/'+filename,"rb")} + data = {"target_dir": "fromsite"} + + try: + resp = reqs.post(url, files=files, data=data) + print(resp.status_code, resp.json()) + except: + plog(traceback.format_exc()) + breakpoint() -# try: -# # 1) Enable debug output (very helpful for diagnosing protocol failures) -# ftp.set_debuglevel(2) - -# # 2) Connect and log in -# ftp.connect(server, port, timeout=30) -# ftp.login(username, password) - -# # 3) If using FTP_TLS, switch to secure data channel -# if use_tls: -# ftp.prot_p() # “Protection Level: Private” for data - -# # 4) Make sure you’re in the correct transfer mode (passive/active) -# # By default ftplib is passive, but explicitly set it: -# ftp.set_pasv(True) -# # If passive keeps hanging, try ftp.set_pasv(False) instead. - -# # 5) Switch to the target directory -# ftp.cwd(remote_dir) - -# # 6) Upload each file -# for fname in filenames: -# local_path = os.path.join(local_dir, fname) -# if not os.path.isfile(local_path): -# print(f" [!] Skipping {local_path}: not found on disk.") -# continue - -# with open(local_path, "rb") as f: -# # 'STOR ' stores the file under the same name -# ftp.storbinary(f"STOR {fname}", f) -# print(f" Uploaded: {fname}") - -# except ftplib.all_errors as e: -# print("[ERROR] FTP upload failed:") -# traceback.print_exc() -# breakpoint() -# finally: -# # 7) Always quit (closing the connection) -# try: -# ftp.quit() -# except Exception: -# ftp.close() - -import ftputil -import os def ftp_upload_with_ftputil( server: str, @@ -765,6 +674,24 @@ def create_directories(base_path, camera_name, subdirectories): ) self.ftp_queue_thread.daemon = True self.ftp_queue_thread.start() + + if self.config['save_images_to_pipe_for_processing'] and self.config['pipe_save_method'] == 'http': + + with open('httpsecrets.json', "r") as f: + cfg = json.load(f) + + self.fitserver = cfg["SERVER"] + # self.ftpport = cfg["PORT"] + # self.ftpusername = cfg["USERNAME"] + # self.ftppassword = cfg["PASSWORD"] + # self.ftpremotedir = cfg["REMOTE_DIR"] + + self.http_queue = queue.Queue(maxsize=0) + self.http_queue_thread = threading.Thread( + target=self.http_process, args=() + ) + self.http_queue_thread.daemon = True + self.http_queue_thread.start() @@ -3191,28 +3118,6 @@ def reporttonightlog_process(self): else: time.sleep(0.25) - # def ftp_process(self): - # """This is a thread where files are ingested by ftp to a pipe server - # """ - - # while True: - # if not self.ftp_queue.empty(): - # while not self.ftp_queue.empty(): - # try: - # (filedirectory, filename, timestamp) = self.ftp_queue.get(block=False) - - - - # ftp_upload_files(self.fitserver ,self.ftpport, self.ftpusername, self.ftppassword , self.ftpremotedir, filedirectory, filename) - - # except: - # plog("Night Log did not write, usually not fatal.") - # plog(traceback.format_exc()) - - # self.ftp_queue.task_done() - # else: - # time.sleep(0.25) - def ftp_process(self): """This is a thread where files are ingested by ftp to a pipe server.""" while True: @@ -3285,6 +3190,79 @@ def ftp_process(self): else: # If queue was empty (and after scanning), sleep briefly time.sleep(0.25) + + def http_process(self): + """This is a thread where files are ingested by http to a pipe server.""" + while True: + # ─── 1) Scan ingestion folder for new `.fits.fz` files ─── + try: + ingestion_folder = self.config['http_ingestion_folder'] + entries = os.listdir(ingestion_folder) + except Exception as e: + plog(f"Could not list '{ingestion_folder}': {e}") + entries = [] + + # Grab a snapshot of everything already in the queue + # (since queue.Queue() doesn't have a direct 'contains' method, + # we peek at its internal deque via .queue) + try: + queued_items = list(self.http_queue.queue) + except Exception: + queued_items = [] + + for fname in entries: + if not fname.endswith('.fits.fz'): + continue + + full_path = os.path.join(ingestion_folder, fname) + + # Check if (ingestion_folder, fname) is already queued + already_queued = any( + (item[0] == ingestion_folder and item[1] == fname) + for item in queued_items + if isinstance(item, tuple) and len(item) >= 2 + ) + if not already_queued: + try: + # Use file‐modification time as the “timestamp” + ts = os.path.getmtime(full_path) + except Exception: + ts = time.time() + + # Enqueue a tuple: (filedirectory, filename, timestamp) + self.http_queue.put((ingestion_folder, fname, ts)) + plog(f"Enqueued new HTTP file: {fname}") + + # ─── 2) Once scanning is done, process whatever is in http_queue ─── + if not self.http_queue.empty(): + # As long as there are items, keep pulling them off + while True: + try: + (filedirectory, filename, timestamp) = self.http_queue.get(block=False) + except Empty: + # No more items right now; break back to top of outer loop + break + + try: + print ("TRYING HTTP") + http_upload( + self.fitserver, + # self.ftpport, + # self.ftpusername, + # self.ftppassword, + # self.ftpremotedir, + filedirectory, + filename + ) + except Exception: + plog("Night Log did not write, usually not fatal.") + plog(traceback.format_exc()) + + # Mark this item as done + self.http_queue.task_done() + else: + # If queue was empty (and after scanning), sleep briefly + time.sleep(0.25) def platesolve_process(self): @@ -4573,6 +4551,9 @@ def report_to_nightlog(self, log): def add_to_ftpqueue(self, filedirectory, filename): self.ftp_queue.put((filedirectory, filename, time.time()), block=False) + + def add_to_httpqueue(self, filedirectory, filename): + self.http_queue.put((filedirectory, filename, time.time()), block=False) def check_platesolve_and_nudge(self, no_confirmation=True): """ diff --git a/subprocesses/fz_archive_file.py b/subprocesses/fz_archive_file.py index f07adbee0..4f67a8240 100644 --- a/subprocesses/fz_archive_file.py +++ b/subprocesses/fz_archive_file.py @@ -83,6 +83,8 @@ def print(*args): try: if selfconfig['pipe_save_method'] == 'ftp': pipefolder = selfconfig['ftp_ingestion_folder'] + elif selfconfig['pipe_save_method'] == 'http': + pipefolder = selfconfig['http_ingestion_folder'] else: pipefolder = selfconfig['pipe_archive_folder_path'] +'/'+ str(temphduheader['INSTRUME']) +'/'+ str(temphduheader['DAY-OBS']) if not os.path.exists(selfconfig['pipe_archive_folder_path']+'/'+ str(temphduheader['INSTRUME'])): From 26bf8e10b1349f035c02366521a8b750927e9022 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 7 Jun 2025 16:58:54 +1000 Subject: [PATCH 05/30] http ingestion working at LCS1 --- devices/camera.py | 12 ++++++------ devices/sequencer.py | 2 ++ obs.py | 35 ++++++++++++++++++++++++++++++----- 3 files changed, 38 insertions(+), 11 deletions(-) diff --git a/devices/camera.py b/devices/camera.py index e03bd80bd..4fe8aa89c 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -3955,7 +3955,7 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): try: # with open(f"{pipetokenfolder}/{token_name}{suffix}", 'w') as f: # json.dump(temp_file_holder, f, indent=2) - real_path = f"{pipetokenfolder}/{token_name}{suffix}" + real_path = f"{pipetokenfolder}/{token_name}{suffix}.json" temp_path = real_path + ".tmp" # 1. Write to “.tmp” @@ -3968,9 +3968,9 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): os.replace(temp_path, real_path) if self.site_config['pipe_save_method'] == 'ftp': - g_dev['obs'].add_to_ftpqueue(pipetokenfolder, str(token_name)+str(suffix)) + g_dev['obs'].add_to_ftpqueue(pipetokenfolder, str(token_name)+str(suffix)+'.json') if self.site_config['pipe_save_method'] == 'http': - g_dev['obs'].add_to_httpqueue(pipetokenfolder, str(token_name)+str(suffix)) + g_dev['obs'].add_to_httpqueue(pipetokenfolder, str(token_name)+str(suffix)+'.json') except: plog(traceback.format_exc()) @@ -3978,7 +3978,7 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): try: # with open(pipetokenfolder + "/" + token_name, 'w') as f: # json.dump(real_time_files, f, indent=2) - real_path = os.path.join(pipetokenfolder, token_name) + real_path = os.path.join(pipetokenfolder, token_name +'.json') temp_path = real_path + ".tmp" # 1. Write into the “.tmp” file @@ -3990,9 +3990,9 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): # 2. Atomically replace (or create) the real file os.replace(temp_path, real_path) if self.site_config['pipe_save_method'] == 'ftp': - g_dev['obs'].add_to_ftpqueue(pipetokenfolder, token_name) + g_dev['obs'].add_to_ftpqueue(pipetokenfolder, token_name+'.json') elif self.site_config['pipe_save_method'] == 'http': - g_dev['obs'].add_to_httpqueue(pipetokenfolder, token_name) + g_dev['obs'].add_to_httpqueue(pipetokenfolder, token_name+'.json') except: plog(traceback.format_exc()) except: diff --git a/devices/sequencer.py b/devices/sequencer.py index 0d23a1232..fc8bf8cc4 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -329,6 +329,8 @@ def copy_failed_pipe_files_thread(self): if self.config['pipe_save_method'] == 'ftp': pipetokenfolder = self.config['ftp_ingestion_folder'] + elif self.config['pipe_save_method'] == 'http': + pipetokenfolder = self.config['http_ingestion_folder'] else: pipetokenfolder = self.config['pipe_archive_folder_path'] + '/tokens' diff --git a/obs.py b/obs.py index a8829660e..c541e1356 100644 --- a/obs.py +++ b/obs.py @@ -79,16 +79,34 @@ def http_upload(server, filedirectory,filename): - url = "http://110.143.205.252/archive_upload" - files = {"file": open(filedirectory +'/'+filename,"rb")} + files = {"file": open(str(filedirectory +'/'+filename).replace('//','/'),"rb")} data = {"target_dir": "fromsite"} + print (files) + + #breakpoint() + + + try: - resp = reqs.post(url, files=files, data=data) - print(resp.status_code, resp.json()) + resp = reqs.post(server, files=files, data=data) + print("HTTP", resp.status_code) + ct = resp.headers.get("Content-Type", "") + + if resp.ok and "application/json" in ct: + # only parse JSON if it really is JSON + print(resp.json()) + else: + # fallback to raw text so you can see the error + print(resp.text) + return True except: plog(traceback.format_exc()) breakpoint() + + return False + + def ftp_upload_with_ftputil( @@ -3245,7 +3263,7 @@ def http_process(self): try: print ("TRYING HTTP") - http_upload( + success=http_upload( self.fitserver, # self.ftpport, # self.ftpusername, @@ -3254,6 +3272,13 @@ def http_process(self): filedirectory, filename ) + + if success: + try: + os.remove(filedirectory + '/' + filename) + except: + plog ("couldn't remove " + filedirectory + '/' + filename) + except Exception: plog("Night Log did not write, usually not fatal.") plog(traceback.format_exc()) From f32c5a7e6f4b11abf177c46415ff2f668ecfb287 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Sat, 7 Jun 2025 19:44:47 +1000 Subject: [PATCH 06/30] Update post_exposure_subprocess.py --- subprocesses/post_exposure_subprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/subprocesses/post_exposure_subprocess.py b/subprocesses/post_exposure_subprocess.py index a0bbae9a6..64d6e67e6 100644 --- a/subprocesses/post_exposure_subprocess.py +++ b/subprocesses/post_exposure_subprocess.py @@ -935,7 +935,7 @@ def write_raw_file_out(packet): # While we wait for the platesolving to happen we do all the other stuff # And we will pick up the solution towards the end. - + hdu.header["ORIGIN"] = ("COMMUNITY", 'Organization responsible for the data')) # assign the keyword values and comment of the keyword as a tuple to write both to header. hdu.header["BUNIT"] = ("adu", "Unit of array values") From 779e47419ab404ab8c059810622563c1f17bc511 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Sun, 8 Jun 2025 08:07:46 +1000 Subject: [PATCH 07/30] typo --- subprocesses/post_exposure_subprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/subprocesses/post_exposure_subprocess.py b/subprocesses/post_exposure_subprocess.py index 64d6e67e6..dafda1095 100644 --- a/subprocesses/post_exposure_subprocess.py +++ b/subprocesses/post_exposure_subprocess.py @@ -935,7 +935,7 @@ def write_raw_file_out(packet): # While we wait for the platesolving to happen we do all the other stuff # And we will pick up the solution towards the end. - hdu.header["ORIGIN"] = ("COMMUNITY", 'Organization responsible for the data')) + hdu.header["ORIGIN"] = ("COMMUNITY", 'Organization responsible for the data') # assign the keyword values and comment of the keyword as a tuple to write both to header. hdu.header["BUNIT"] = ("adu", "Unit of array values") From 8ffab2d085ca00bc48d650b5bdc41e8397bb9105 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Wed, 18 Jun 2025 10:44:34 +1000 Subject: [PATCH 08/30] Update sequencer.py --- devices/sequencer.py | 54 +++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/devices/sequencer.py b/devices/sequencer.py index fc8bf8cc4..d6c4a08d7 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -2854,8 +2854,9 @@ def make_scaled_dark(self,input_folder, filename_start, masterBias, shapeImage, if g_dev['obs'].config['save_archive_versions_of_final_calibrations']: g_dev['obs'].to_slow_process(200000000, ('fits_file_save', g_dev['obs'].calib_masters_folder + 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + filename_start+'_master_bin1.fits', copy.deepcopy(masterDark), calibhduheader, g_dev['obs'].calib_masters_folder, 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + filename_start+'_master_bin1.fits' )) - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + filename_start+'_master_bin1.npy',copy.deepcopy(masterDark))) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + filename_start+'_master_bin1.npy',copy.deepcopy(masterDark))) except Exception as e: plog(traceback.format_exc()) @@ -2993,8 +2994,9 @@ def make_bias_dark(self,input_folder, filename_start, masterBias, shapeImage, ar g_dev['obs'].to_slow_process(200000000, ('fits_file_save', g_dev['obs'].calib_masters_folder + 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + filename_start+'_master_bin1.fits', copy.deepcopy(masterDark.astype(np.uint16)), calibhduheader, g_dev['obs'].calib_masters_folder, 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + filename_start+'_master_bin1.fits' )) - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + filename_start+'_master_bin1.npy',copy.deepcopy(masterDark.astype(np.uint16)))) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + filename_start+'_master_bin1.npy',copy.deepcopy(masterDark.astype(np.uint16)))) except Exception as e: plog(traceback.format_exc()) @@ -3031,17 +3033,19 @@ def regenerate_local_masters(self, requesttype): #g_dev["obs"].send_to_user("Currently regenerating local masters.") g_dev['obs'].report_to_nightlog("Started regenerating calibrations") - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - try: - pipefolder = g_dev['obs'].config['pipe_archive_folder_path'] +'/calibrations/'+ g_dev['cam'].alias - if not os.path.exists(g_dev['obs'].config['pipe_archive_folder_path']+'/calibrations'): - os.makedirs(g_dev['obs'].config['pipe_archive_folder_path'] + '/calibrations') - - if not os.path.exists(g_dev['obs'].config['pipe_archive_folder_path'] +'/calibrations/'+ g_dev['cam'].alias): - os.makedirs(g_dev['obs'].config['pipe_archive_folder_path'] +'/calibrations/'+ g_dev['cam'].alias) - except: - plog("pipefolder failure") - plog(traceback.format_exc()) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + + try: + pipefolder = g_dev['obs'].config['pipe_archive_folder_path'] +'/calibrations/'+ g_dev['cam'].alias + if not os.path.exists(g_dev['obs'].config['pipe_archive_folder_path']+'/calibrations'): + os.makedirs(g_dev['obs'].config['pipe_archive_folder_path'] + '/calibrations') + + if not os.path.exists(g_dev['obs'].config['pipe_archive_folder_path'] +'/calibrations/'+ g_dev['cam'].alias): + os.makedirs(g_dev['obs'].config['pipe_archive_folder_path'] +'/calibrations/'+ g_dev['cam'].alias) + except: + plog("pipefolder failure") + plog(traceback.format_exc()) else: pipefolder='' @@ -3208,8 +3212,9 @@ def regenerate_local_masters(self, requesttype): if g_dev['obs'].config['save_archive_versions_of_final_calibrations']: g_dev['obs'].to_slow_process(200000000, ('fits_file_save', g_dev['obs'].calib_masters_folder + 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'BIAS_master_bin1.fits', copy.deepcopy(masterBias.astype(np.uint16)), calibhduheader, g_dev['obs'].calib_masters_folder, 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'BIAS_master_bin1.fits' )) - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + 'BIAS_master_bin1.npy',copy.deepcopy(masterBias.astype(np.uint16)))) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + 'BIAS_master_bin1.npy',copy.deepcopy(masterBias.astype(np.uint16)))) except Exception as e: plog ("Could not save bias frame: ",e) @@ -3299,8 +3304,9 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 if g_dev['obs'].config['save_archive_versions_of_final_calibrations']: g_dev['obs'].to_slow_process(200000000, ('fits_file_save', g_dev['obs'].calib_masters_folder + 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'readnoise_variance_adu.fits', copy.deepcopy(variance_frame.astype('float32')), calibhduheader, g_dev['obs'].calib_masters_folder, 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'readnoise_variance_adu.fits' )) - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - g_dev['obs'].to_slow_process(200000000, ('numpy_array_save', pipefolder + '/' + tempfrontcalib + 'readnoise_variance_adu.npy', copy.deepcopy(variance_frame.astype('float32'))))#, hdu.header, frame_type, g_dev["mnt"].current_icrs_ra, g_dev["mnt"].current_icrs_dec)) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + g_dev['obs'].to_slow_process(200000000, ('numpy_array_save', pipefolder + '/' + tempfrontcalib + 'readnoise_variance_adu.npy', copy.deepcopy(variance_frame.astype('float32'))))#, hdu.header, frame_type, g_dev["mnt"].current_icrs_ra, g_dev["mnt"].current_icrs_dec)) except Exception as e: plog ("Could not save variance frame: ",e) @@ -3853,8 +3859,9 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 if g_dev['obs'].config['save_archive_versions_of_final_calibrations']: g_dev['obs'].to_slow_process(200000000, ('fits_file_save', g_dev['obs'].calib_masters_folder + 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'masterFlat_'+ str(filtercode) + '_bin1.fits', copy.deepcopy(temporaryFlat), calibhduheader, g_dev['obs'].calib_masters_folder, 'ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'masterFlat_'+ str(filtercode) + '_bin1.fits' )) - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - g_dev['obs'].to_slow_process(200000000, ('numpy_array_save', pipefolder + '/' + tempfrontcalib + 'masterFlat_'+ str(filtercode) + '_bin1.npy', copy.deepcopy(temporaryFlat)))#, hdu.header, frame_type, g_dev["mnt"].current_icrs_ra, g_dev["mnt"].current_icrs_dec)) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + g_dev['obs'].to_slow_process(200000000, ('numpy_array_save', pipefolder + '/' + tempfrontcalib + 'masterFlat_'+ str(filtercode) + '_bin1.npy', copy.deepcopy(temporaryFlat)))#, hdu.header, frame_type, g_dev["mnt"].current_icrs_ra, g_dev["mnt"].current_icrs_dec)) except Exception as e: plog ("Could not save flat frame: ",e) @@ -3987,8 +3994,9 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 #filepathaws=g_dev['obs'].calib_masters_folder #filenameaws='ARCHIVE_' + archiveDate + '_' + tempfrontcalib + 'badpixelmask_bin1.fits' #g_dev['obs'].enqueue_for_calibrationUI(80, filepathaws,filenameaws) - if g_dev['obs'].config['save_raws_to_pipe_folder_for_nightly_processing']: - g_dev['obs'].to_slow_process(200000000, ('numpy_array_save', pipefolder + '/' + tempfrontcalib + 'badpixelmask_bin1.npy', copy.deepcopy( bad_pixel_mapper_array)))#, hdu.header, frame_type, g_dev["mnt"].current_icrs_ra, g_dev["mnt"].current_icrs_dec)) + if g_dev['obs'].config['save_images_to_pipe_for_processing']: + if g_dev['obs'].config['pipe_save_method'] == 'local': + g_dev['obs'].to_slow_process(200000000, ('numpy_array_save', pipefolder + '/' + tempfrontcalib + 'badpixelmask_bin1.npy', copy.deepcopy( bad_pixel_mapper_array)))#, hdu.header, frame_type, g_dev["mnt"].current_icrs_ra, g_dev["mnt"].current_icrs_dec)) try: g_dev['cam'].bpmFiles = {} g_dev['cam'].bpmFiles.update({'1': bad_pixel_mapper_array}) From b8c54d693800c0817e012bb61638ebfc7b6624c5 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Wed, 18 Jun 2025 11:29:32 +1000 Subject: [PATCH 09/30] Update sequencer.py --- devices/sequencer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/devices/sequencer.py b/devices/sequencer.py index d6c4a08d7..f6050e56d 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -3046,6 +3046,8 @@ def regenerate_local_masters(self, requesttype): except: plog("pipefolder failure") plog(traceback.format_exc()) + else: + pipefolder='' else: pipefolder='' From 076fb7bcc5068a2d562ac67da4922c93dcbb0041 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Wed, 18 Jun 2025 11:50:37 +1000 Subject: [PATCH 10/30] upload variance and badpixel to http pipe --- devices/camera.py | 8 ++++---- devices/sequencer.py | 40 +++++++++++++++++++++++++++++++++++++++- obs.py | 15 ++++++++------- 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/devices/camera.py b/devices/camera.py index 4fe8aa89c..24df244b9 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -3968,9 +3968,9 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): os.replace(temp_path, real_path) if self.site_config['pipe_save_method'] == 'ftp': - g_dev['obs'].add_to_ftpqueue(pipetokenfolder, str(token_name)+str(suffix)+'.json') + g_dev['obs'].add_to_ftpqueue(pipetokenfolder, str(token_name)+str(suffix)+'.json', 'fromsite') if self.site_config['pipe_save_method'] == 'http': - g_dev['obs'].add_to_httpqueue(pipetokenfolder, str(token_name)+str(suffix)+'.json') + g_dev['obs'].add_to_httpqueue(pipetokenfolder, str(token_name)+str(suffix)+'.json', 'fromsite') except: plog(traceback.format_exc()) @@ -3990,9 +3990,9 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): # 2. Atomically replace (or create) the real file os.replace(temp_path, real_path) if self.site_config['pipe_save_method'] == 'ftp': - g_dev['obs'].add_to_ftpqueue(pipetokenfolder, token_name+'.json') + g_dev['obs'].add_to_ftpqueue(pipetokenfolder, token_name+'.json', 'fromsite') elif self.site_config['pipe_save_method'] == 'http': - g_dev['obs'].add_to_httpqueue(pipetokenfolder, token_name+'.json') + g_dev['obs'].add_to_httpqueue(pipetokenfolder, token_name+'.json', 'fromsite') except: plog(traceback.format_exc()) except: diff --git a/devices/sequencer.py b/devices/sequencer.py index f6050e56d..d0549a95d 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -2857,7 +2857,7 @@ def make_scaled_dark(self,input_folder, filename_start, masterBias, shapeImage, if g_dev['obs'].config['save_images_to_pipe_for_processing']: if g_dev['obs'].config['pipe_save_method'] == 'local': g_dev['obs'].to_slow_process(200000000, ('numpy_array_save',pipefolder + '/'+tempfrontcalib + filename_start+'_master_bin1.npy',copy.deepcopy(masterDark))) - + except Exception as e: plog(traceback.format_exc()) plog ("Could not save dark frame: ",e) @@ -4133,6 +4133,44 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 #g_dev["obs"].send_to_user("All calibration frames completed.") g_dev['obs'].report_to_nightlog("Finished regenerating calibrations") + + if self.config['pipe_save_method'] == 'http': + # Now we just wait a significant amount of time to make sure all the threads have stopped saving files + time.sleep(600) + # Then we send up the variance and the badpixelmap to the pipe to make files + + calib_folder = os.path.join( + g_dev['obs'].calib_masters_folder, + 'tempfrontcalib' + ) + + try: + entries = os.listdir(calib_folder) + except Exception as e: + plog(f"Could not list calibration folder '{calib_folder}': {e}") + return + + for fname in entries: + # only want .npy files + if not fname.lower().endswith('.npy'): + continue + + # name must contain one of the keywords + if 'variance' not in fname and 'badpixelmask' not in fname: + continue + + full_path = os.path.join(calib_folder, fname) + + # get a timestamp (file‐modification time) + try: + ts = os.path.getmtime(full_path) + except OSError: + ts = time.time() + + # enqueue: (folder, filename, upload_type, timestamp) + self.http_queue.put((calib_folder, fname, 'calibrations', ts)) + plog(f"Enqueued calibration file: {fname}") + return diff --git a/obs.py b/obs.py index c541e1356..cd45f1e5a 100644 --- a/obs.py +++ b/obs.py @@ -77,7 +77,7 @@ -def http_upload(server, filedirectory,filename): +def http_upload(server, filedirectory, filename): files = {"file": open(str(filedirectory +'/'+filename).replace('//','/'),"rb")} data = {"target_dir": "fromsite"} @@ -3229,7 +3229,7 @@ def http_process(self): queued_items = [] for fname in entries: - if not fname.endswith('.fits.fz'): + if not fname.endswith('.json', '.npy', '.fits.fz', '.fits'): continue full_path = os.path.join(ingestion_folder, fname) @@ -3248,7 +3248,7 @@ def http_process(self): ts = time.time() # Enqueue a tuple: (filedirectory, filename, timestamp) - self.http_queue.put((ingestion_folder, fname, ts)) + self.http_queue.put((ingestion_folder, fname, ts, 'from_site')) plog(f"Enqueued new HTTP file: {fname}") # ─── 2) Once scanning is done, process whatever is in http_queue ─── @@ -3256,7 +3256,7 @@ def http_process(self): # As long as there are items, keep pulling them off while True: try: - (filedirectory, filename, timestamp) = self.http_queue.get(block=False) + (filedirectory, filename, upload_type, timestamp) = self.http_queue.get(block=False) except Empty: # No more items right now; break back to top of outer loop break @@ -3270,7 +3270,8 @@ def http_process(self): # self.ftppassword, # self.ftpremotedir, filedirectory, - filename + filename, + upload_type ) if success: @@ -4577,8 +4578,8 @@ def report_to_nightlog(self, log): def add_to_ftpqueue(self, filedirectory, filename): self.ftp_queue.put((filedirectory, filename, time.time()), block=False) - def add_to_httpqueue(self, filedirectory, filename): - self.http_queue.put((filedirectory, filename, time.time()), block=False) + def add_to_httpqueue(self, filedirectory, filename, upload_type): + self.http_queue.put((filedirectory, filename, upload_type, time.time()), block=False) def check_platesolve_and_nudge(self, no_confirmation=True): """ From e5b91b6140fb8e33891e5d38c315a9c8753eff65 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Wed, 18 Jun 2025 12:02:51 +1000 Subject: [PATCH 11/30] Update obs.py --- obs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/obs.py b/obs.py index cd45f1e5a..28cfbdffd 100644 --- a/obs.py +++ b/obs.py @@ -77,10 +77,10 @@ -def http_upload(server, filedirectory, filename): +def http_upload(server, filedirectory, filename, upload_type): files = {"file": open(str(filedirectory +'/'+filename).replace('//','/'),"rb")} - data = {"target_dir": "fromsite"} + data = {"target_dir": upload_type} print (files) @@ -3229,7 +3229,7 @@ def http_process(self): queued_items = [] for fname in entries: - if not fname.endswith('.json', '.npy', '.fits.fz', '.fits'): + if not fname.endswith(('.json', '.npy', '.fits.fz', '.fits')): continue full_path = os.path.join(ingestion_folder, fname) From 0b45a285c9df992239c27c0e41e5b42eec91f118 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Wed, 18 Jun 2025 14:24:42 +1000 Subject: [PATCH 12/30] don't check altitude while parked --- obs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/obs.py b/obs.py index 28cfbdffd..09593841a 100644 --- a/obs.py +++ b/obs.py @@ -2126,8 +2126,8 @@ def safety_and_monitoring_checks_loop(self): plog.err("self.enc_status not reporting correctly") if not self.mountless_operation: - # Check that the mount hasn't tracked too low or an odd slew hasn't sent it pointing to the ground. - if self.altitude_checks_on and not self.devices["mount"].currently_slewing: + # Check that the mount hasn't tracked too low or an odd slew hasn't sent it pointing to the ground and it isn't just parked. + if self.altitude_checks_on and not self.devices["mount"].currently_slewing and not self.devices["mount"].rapid_park_indicator: try: mount_altitude = float( self.devices["mount"].previous_status["altitude"] From 5b29904f146340c1326469f83aaddaa23ba446b6 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Wed, 18 Jun 2025 14:25:55 +1000 Subject: [PATCH 13/30] typos and configs --- configs/lcs1/obs_config.py | 2 +- devices/sequencer.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index 7c3641f0a..0a0218417 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -57,7 +57,7 @@ 'closest_distance_to_the_moon': 3, # Degrees. For normal pointing requests don't go this close to the moon. 'minimum_distance_from_the_moon_when_taking_flats': 45, 'lowest_requestable_altitude': 15, # Degrees. For normal pointing requests don't allow requests to go this low. - 'lowest_acceptable_altitude' : -15.0, # Below this altitude, it will automatically try to home and park the scope to recover. + 'lowest_acceptable_altitude' : -20.0, # Below this altitude, it will automatically try to home and park the scope to recover. 'degrees_to_avoid_zenith_area_for_calibrations': 0, 'degrees_to_avoid_zenith_area_in_general' : 0, 'maximum_hour_angle_requestable' : 12, diff --git a/devices/sequencer.py b/devices/sequencer.py index d0549a95d..d7b6acc56 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -4140,8 +4140,7 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 # Then we send up the variance and the badpixelmap to the pipe to make files calib_folder = os.path.join( - g_dev['obs'].calib_masters_folder, - 'tempfrontcalib' + g_dev['obs'].calib_masters_folder ) try: @@ -4168,7 +4167,7 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 ts = time.time() # enqueue: (folder, filename, upload_type, timestamp) - self.http_queue.put((calib_folder, fname, 'calibrations', ts)) + g_dev['obs'].http_queue.put((calib_folder, fname, 'calibrations', ts)) plog(f"Enqueued calibration file: {fname}") return From 37b349feeb85b613767f6d466129a95d460081ef Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Fri, 20 Jun 2025 09:32:07 +1000 Subject: [PATCH 14/30] remove variance and don't remove calibrations --- devices/sequencer.py | 5 +++-- obs.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/devices/sequencer.py b/devices/sequencer.py index 408e80d92..52a463332 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -4149,8 +4149,9 @@ def estimate_read_noise_chunked(bias_frames, frame_shape, gain=1.0, chunk_size=1 if not fname.lower().endswith('.npy'): continue - # name must contain one of the keywords - if 'variance' not in fname and 'badpixelmask' not in fname: + # name must contain one of the keywords AND start with tempfrontcalib + if ( 'variance' not in fname and 'badpixelmask' not in fname ) \ + or not fname.startswith(tempfrontcalib): continue full_path = os.path.join(calib_folder, fname) diff --git a/obs.py b/obs.py index 6e628bc46..8a0e78fd0 100644 --- a/obs.py +++ b/obs.py @@ -3356,7 +3356,7 @@ def http_process(self): upload_type ) - if success: + if success and not upload_type == 'calibrations': try: os.remove(filedirectory + '/' + filename) except: From 51400bdd0d7c6bb2f0ade0df34d65ae35a9d8494 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Fri, 20 Jun 2025 19:48:46 +1000 Subject: [PATCH 15/30] api base config item --- configs/lcs1/obs_config.py | 5 ++ devices/sequencer.py | 3 +- obs.py | 140 ++++++++++++++++++++----------------- 3 files changed, 84 insertions(+), 64 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index 30c59def2..b22ac4124 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -32,6 +32,11 @@ 'obs_id': 'lcs1', + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', + # Name, local and owner stuff 'name': 'Luther College Observatory, 10" Newtonian', 'airport_code': 'MEL: Melbourne Airport', diff --git a/devices/sequencer.py b/devices/sequencer.py index 52a463332..d6e3c21e4 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -102,7 +102,8 @@ def ra_fix_h(ra): def authenticated_request(method: str, uri: str, payload: dict = None) -> str: # Populate the request parameters. Include data only if it was sent. - base_url="https://api.photonranch.org/api" + #base_url="https://api.photonranch.org/api" + base_url=g_dev['obs'].api_http_base request_kwargs = { "method": method, "timeout" : 10, diff --git a/obs.py b/obs.py index 8a0e78fd0..8a2a9c9ce 100644 --- a/obs.py +++ b/obs.py @@ -72,7 +72,7 @@ status_forcelist=[500, 502, 503, 504]) reqs.mount("http://", HTTPAdapter(max_retries=retries)) -import ftputil +#import ftputil @@ -109,60 +109,60 @@ def http_upload(server, filedirectory, filename, upload_type): -def ftp_upload_with_ftputil( - server: str, - port: int, - username: str, - password: str, - remote_dir: str, - local_dir: str, - filenames: list[str], - use_passive: bool = True, - timeout: int = 30 -): - """ - Uses ftputil to connect and upload a list of files. - """ - # ftputil’s FTPHost automatically handles connect/login under the hood. - - try: - host = ftputil.FTPHost( - host=server, - user=username, - passwd=password, - port=port, - timeout=timeout, - ) - # Toggle passive mode if needed (default is True) - host.session.set_pasv(use_passive) - - except: - plog(traceback.format_exc()) - breakpoint() - - try: - # Ensure the remote directory exists (mkdirs=True will create nested dirs) - host.makedirs(remote_dir, exist_ok=True) - - for fname in filenames: - local_path = os.path.join(local_dir, fname) - if not os.path.isfile(local_path): - print(f"[!] Skipping {local_path} (not found).") - continue - - remote_path = host.path.join(remote_dir, fname) - print(f"Uploading {local_path} → {remote_path}…", end=" ") - # upload_if_newer only sends if local is newer or remote missing; - # you can also use host.upload(local_path, remote_path) to force. - host.upload_if_newer(local_path, remote_path) - print("done.") +# def ftp_upload_with_ftputil( +# server: str, +# port: int, +# username: str, +# password: str, +# remote_dir: str, +# local_dir: str, +# filenames: list[str], +# use_passive: bool = True, +# timeout: int = 30 +# ): +# """ +# Uses ftputil to connect and upload a list of files. +# """ +# # ftputil’s FTPHost automatically handles connect/login under the hood. + +# try: +# host = ftputil.FTPHost( +# host=server, +# user=username, +# passwd=password, +# port=port, +# timeout=timeout, +# ) +# # Toggle passive mode if needed (default is True) +# host.session.set_pasv(use_passive) + +# except: +# plog(traceback.format_exc()) +# breakpoint() + +# try: +# # Ensure the remote directory exists (mkdirs=True will create nested dirs) +# host.makedirs(remote_dir, exist_ok=True) + +# for fname in filenames: +# local_path = os.path.join(local_dir, fname) +# if not os.path.isfile(local_path): +# print(f"[!] Skipping {local_path} (not found).") +# continue + +# remote_path = host.path.join(remote_dir, fname) +# print(f"Uploading {local_path} → {remote_path}…", end=" ") +# # upload_if_newer only sends if local is newer or remote missing; +# # you can also use host.upload(local_path, remote_path) to force. +# host.upload_if_newer(local_path, remote_path) +# print("done.") - except Exception as e: - print("[ERROR] ftputil upload failed:", e) - finally: - host.close() +# except Exception as e: +# print("[ERROR] ftputil upload failed:", e) +# finally: +# host.close() def test_connect(host="http://google.com"): # This just tests the net connection @@ -212,7 +212,8 @@ def findProcessIdByName(processName): def authenticated_request(method: str, uri: str, payload: dict = None) -> str: # Populate the request parameters. Include data only if it was sent. - base_url = "https://api.photonranch.org/api" + #base_url = "https://api.photonranch.org/api" + base_url = self.api_http_base request_kwargs = { "method": method, "timeout": 10, @@ -228,7 +229,7 @@ def authenticated_request(method: str, uri: str, payload: dict = None) -> str: def send_status(obsy, column, status_to_send): """Sends an update to the status endpoint.""" - uri_status = f"https://status.photonranch.org/status/{obsy}/status/" + uri_status = self. status_http_base + f"{obsy}/status/" payload = {"statusType": str(column), "status": status_to_send} # if column == 'weather': # print("Did not send spurious weathr report.") @@ -263,6 +264,13 @@ def __init__(self, name, ptr_config): g_dev["name"] = name self.config = ptr_config + + + self.api_http_base=self.config['api_http_base'] + self.jobs_http_base=self.config['jobs_http_base'] + self.status_http_base=self.config['status_http_base'] + self.logs_http_base=self.config['logs_http_base'] + self.wema_name = self.config["wema_name"] self.wema_config = self.get_wema_config() # fetch the wema config from AWS @@ -747,7 +755,8 @@ def create_directories(base_path, camera_name, subdirectories): try: reqs.request( "POST", - "https://jobs.photonranch.org/jobs/getnewjobs", + #"https://jobs.photonranch.org/jobs/getnewjobs", + self.jobs_http_base +'getnewjobs', data=json.dumps({"site": self.name}), timeout=30, ).json() @@ -995,7 +1004,7 @@ def create_devices(self): def get_wema_config(self): """ Fetch the WEMA config from AWS """ wema_config = None - url = f"https://api.photonranch.org/api/{self.wema_name}/config/" + url = self.api_http_base + f"{self.wema_name}/config/" try: response = requests.get(url, timeout=20) wema_config = response.json()['configuration'] @@ -1091,7 +1100,8 @@ def scan_requests(self, cancel_check=False): """ self.scan_request_timer = time.time() - url_job = "https://jobs.photonranch.org/jobs/getnewjobs" + #url_job = "https://jobs.photonranch.org/jobs/getnewjobs" + url_job = self.jobs_http_base + "/getnewjobs" body = {"site": self.name} cmd = {} # Get a list of new jobs to complete (this request @@ -1944,7 +1954,7 @@ def safety_and_monitoring_checks_loop(self): and not self.devices["sequencer"].bias_dark_latch ): self.cancel_all_activity() - if not self.devices["mount"].rapid_park_indicator: + if not self.devices["mount"].r d_park_indicator: self.devices["mount"].park_command() self.currently_updating_FULL = False @@ -3160,7 +3170,8 @@ def sendtouser_process(self): if not self.sendtouser_queue.empty(): while not self.sendtouser_queue.empty(): (p_log, p_level) = self.sendtouser_queue.get(block=False) - url_log = "https://logs.photonranch.org/logs/newlog" + #url_log = "https://logs.photonranch.org/logs/newlog" + url_log = self.logs_http_base + "/newlog" body = json.dumps( { "site": self.config["obs_id"], @@ -4392,7 +4403,8 @@ def fast_to_aws(self): pipe_request["payload"] = payload pipe_request["sender"] = self.name - uri_status = "https://api.photonranch.org/api/pipe/enqueue" + #uri_status = "https://api.photonranch.org/api/pipe/enqueue" + uri_status = self.api_http_base + "pipe/enqueue" try: response = requests.post(uri_status,json=pipe_request, timeout=20)# allow_redirects=False, headers=close_headers) @@ -4774,7 +4786,8 @@ def get_enclosure_status_from_aws(self): """ Requests the current enclosure status from the related WEMA. """ - uri_status = f"https://status.photonranch.org/status/{self.wema_name}/enclosure/" + #uri_status = f"https://status.photonranch.org/status/{self.wema_name}/enclosure/" + uri_status = self.status_http_base + f"{self.wema_name}/enclosure/" try: aws_enclosure_status = reqs.get(uri_status, timeout=20) aws_enclosure_status = aws_enclosure_status.json() @@ -4863,7 +4876,7 @@ def get_weather_status_from_aws(self): Requests the current enclosure status from the related WEMA. """ - uri_status = f"https://status.photonranch.org/status/{self.wema_name}/weather/" + uri_status = self.status_http_base + f"{self.wema_name}/weather/" try: aws_weather_status = reqs.get(uri_status, timeout=20) @@ -5031,7 +5044,8 @@ def flush_command_queue(self): # jobs don't send the scope go wildly. reqs.request( "POST", - "https://jobs.photonranch.org/jobs/getnewjobs", + #"https://jobs.photonranch.org/jobs/getnewjobs", + self.jobs_http_base + 'getnewjobs', data=json.dumps({"site": self.name}), timeout=30, ).json() From 9dbcd7f1de3dbdeed757a9aea9b98db1c8ea7f0e Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Fri, 20 Jun 2025 20:05:52 +1000 Subject: [PATCH 16/30] status_http_base --- obs.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/obs.py b/obs.py index 8a2a9c9ce..82aab6e79 100644 --- a/obs.py +++ b/obs.py @@ -213,7 +213,8 @@ def findProcessIdByName(processName): def authenticated_request(method: str, uri: str, payload: dict = None) -> str: # Populate the request parameters. Include data only if it was sent. #base_url = "https://api.photonranch.org/api" - base_url = self.api_http_base + #base_url = self.api_http_base + base_url=payload['api_http_base'] request_kwargs = { "method": method, "timeout": 10, @@ -226,10 +227,10 @@ def authenticated_request(method: str, uri: str, payload: dict = None) -> str: return response.json() -def send_status(obsy, column, status_to_send): +def send_status(obsy, column, status_to_send, status_http_base): """Sends an update to the status endpoint.""" - uri_status = self. status_http_base + f"{obsy}/status/" + uri_status = status_http_base + f"{obsy}/status/" payload = {"statusType": str(column), "status": status_to_send} # if column == 'weather': # print("Did not send spurious weathr report.") @@ -1032,6 +1033,8 @@ def update_config(self): response = authenticated_request("PUT", uri, self.config) retryapi = False except: + plog(traceback.format_exc()) + breakpoint() plog.warn("connection glitch in update config. Waiting 5 seconds.") time.sleep(5) if "message" in response: @@ -1523,7 +1526,7 @@ def update_status(self, cancel_check=False, mount_only=False, dont_wait=False): lane = "device" if self.send_status_queue.qsize() < 7: self.send_status_queue.put( - (obsy, lane, status), block=False) + (obsy, lane, status, self.status_http_base), block=False) """ Here we update lightning system. @@ -1866,7 +1869,7 @@ def safety_and_monitoring_checks_loop(self): status["obs_settings"]["timedottime_of_last_upload"] = time.time() lane = "obs_settings" try: - send_status(self.name, lane, status) + send_status(self.name, lane, status, self.status_http_base) except: plog.warn("could not send obs_settings status") plog.warn(traceback.format_exc()) @@ -1954,7 +1957,7 @@ def safety_and_monitoring_checks_loop(self): and not self.devices["sequencer"].bias_dark_latch ): self.cancel_all_activity() - if not self.devices["mount"].r d_park_indicator: + if not self.devices["mount"].rapid_park_indicator: self.devices["mount"].park_command() self.currently_updating_FULL = False @@ -3118,7 +3121,7 @@ def send_status_process(self): #print(received_status[0], received_status[1], received_status[2]) send_status( - received_status[0], received_status[1], received_status[2]) + received_status[0], received_status[1], received_status[2], received_status[3]) self.send_status_queue.task_done() upload_time = time.time() - pre_upload self.status_interval = 2 * upload_time @@ -4818,7 +4821,7 @@ def get_enclosure_status_from_aws(self): if self.send_status_queue.qsize() < 7: self.send_status_queue.put( (self.name, "enclosure", - aws_enclosure_status["status"]), + aws_enclosure_status["status"], self.status_http_base), block=False, ) @@ -4931,7 +4934,7 @@ def get_weather_status_from_aws(self): # There is a size limit to the queue if self.send_status_queue.qsize() < 7: self.send_status_queue.put( - (self.name, "weather", aws_weather_status["status"]), block=False + (self.name, "weather", aws_weather_status["status"], self.status_http_base), block=False ) except Exception as e: From 4b61c963a4ba4231d2b91a257216caa5c3bbcde2 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Fri, 20 Jun 2025 20:33:43 +1000 Subject: [PATCH 17/30] api config tidyup --- obs.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/obs.py b/obs.py index 82aab6e79..87c991e5a 100644 --- a/obs.py +++ b/obs.py @@ -210,15 +210,15 @@ def findProcessIdByName(processName): return listOfProcessObjects -def authenticated_request(method: str, uri: str, payload: dict = None) -> str: +def authenticated_request(method: str, uri: str, payload: dict, base_url: str) -> str: # Populate the request parameters. Include data only if it was sent. #base_url = "https://api.photonranch.org/api" #base_url = self.api_http_base - base_url=payload['api_http_base'] + #base_url=api_http_base request_kwargs = { "method": method, "timeout": 10, - "url": f"{base_url}/{uri}", + "url": f"{base_url}{uri}", } if payload is not None: request_kwargs["data"] = json.dumps(payload) @@ -1030,7 +1030,7 @@ def update_config(self): retryapi = True while retryapi: try: - response = authenticated_request("PUT", uri, self.config) + response = authenticated_request("PUT", uri, self.config, self.api_http_base) retryapi = False except: plog(traceback.format_exc()) @@ -2784,7 +2784,7 @@ def ptrarchive_uploader(self, pri_image): if filepath.split(".")[-1] == "token": files = {"file": (filepath, fileobj)} aws_resp = authenticated_request( - "POST", "/upload/", {"object_name": filename} + "POST", "/upload/", {"object_name": filename},self.api_http_base ) retry = 0 while retry < 10: @@ -4444,7 +4444,8 @@ def fast_to_aws(self): request_body["s3_directory"] = "info-images" request_body["info_channel"] = info_image_channel - aws_resp = authenticated_request("POST", "/upload/", request_body) # this gets the presigned s3 upload url + aws_resp = authenticated_request("POST", "/upload/", request_body, self.api_http_base) # this gets the presigned s3 upload url + with open(filepath, "rb") as fileobj: files = {"file": (filepath, fileobj)} try: @@ -4455,6 +4456,7 @@ def fast_to_aws(self): timeout=10, ) except Exception as e: + plog.err((traceback.format_exc())) if ( "timeout" in str(e).lower() or "SSLWantWriteError" @@ -4521,7 +4523,7 @@ def calibration_to_ui(self): # Full path to file on disk filepath = pri_image[1][0] + filename aws_resp = authenticated_request( - "POST", "/upload/", {"object_name": filename} + "POST", "/upload/", {"object_name": filename}, self.api_http_base ) with open(filepath, "rb") as fileobj: files = {"file": (filepath, fileobj)} @@ -4604,7 +4606,7 @@ def medium_to_ui(self): # To the extent it has a size if os.stat(filepath).st_size > 0: aws_resp = authenticated_request( - "POST", "/upload/", {"object_name": filename} + "POST", "/upload/", {"object_name": filename}, self.api_http_base ) with open(filepath, "rb") as fileobj: files = {"file": (filepath, fileobj)} From 6ffcd7e35edf4b396a898b79b9c6cda631bee0fe Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Fri, 20 Jun 2025 20:35:25 +1000 Subject: [PATCH 18/30] set configs --- configs/aro1/obs_config.py | 5 +++++ configs/aro2/obs_config.py | 5 +++++ configs/eco1/obs_config.py | 5 +++++ configs/eco2/obs_config.py | 5 +++++ configs/eco3/obs_config.py | 5 +++++ configs/mrc1/obs_config.py | 5 +++++ configs/mrc2/obs_config.py | 5 +++++ 7 files changed, 35 insertions(+) diff --git a/configs/aro1/obs_config.py b/configs/aro1/obs_config.py index 6e8896062..9dbe04c82 100644 --- a/configs/aro1/obs_config.py +++ b/configs/aro1/obs_config.py @@ -70,6 +70,11 @@ 'obs_id': 'aro1', + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', + # Name, local and owner stuff 'name': 'Apache Ridge Observatory 0m3 f4.9/9', diff --git a/configs/aro2/obs_config.py b/configs/aro2/obs_config.py index 0bb3413e1..73cf03646 100644 --- a/configs/aro2/obs_config.py +++ b/configs/aro2/obs_config.py @@ -39,6 +39,11 @@ # The unique identifier for this obs 'obs_id': 'aro2', + + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', # Name, local and owner stuff 'name': "Apache Ridge Observatory PW 0m45 f6.8 52'X39'", 'airport_code': 'SAF', diff --git a/configs/eco1/obs_config.py b/configs/eco1/obs_config.py index a4f024d03..5860a8ba8 100644 --- a/configs/eco1/obs_config.py +++ b/configs/eco1/obs_config.py @@ -29,6 +29,11 @@ + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', + # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m4f6.8', 'airport_code': 'MEL: Melbourne Airport', diff --git a/configs/eco2/obs_config.py b/configs/eco2/obs_config.py index e5ab70f4a..68d8fcd09 100644 --- a/configs/eco2/obs_config.py +++ b/configs/eco2/obs_config.py @@ -26,6 +26,11 @@ + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', + # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m28', 'airport_code': 'MEL: Melbourne Airport', diff --git a/configs/eco3/obs_config.py b/configs/eco3/obs_config.py index 153a0e3ac..b0c128c77 100644 --- a/configs/eco3/obs_config.py +++ b/configs/eco3/obs_config.py @@ -28,6 +28,11 @@ 'obs_id': 'eco3', + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', + # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m4f6.8', diff --git a/configs/mrc1/obs_config.py b/configs/mrc1/obs_config.py index d60e4ae53..e1a946449 100644 --- a/configs/mrc1/obs_config.py +++ b/configs/mrc1/obs_config.py @@ -39,6 +39,11 @@ # The unique identifier for this obs 'obs_id': 'mrc1', + + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', # Name, local and owner stuff 'name': 'Mountain Ranch Camp Observatory 0m30 F3.8', 'airport_code': 'SBA', diff --git a/configs/mrc2/obs_config.py b/configs/mrc2/obs_config.py index 213516f9d..d3f58c0ad 100644 --- a/configs/mrc2/obs_config.py +++ b/configs/mrc2/obs_config.py @@ -69,6 +69,11 @@ 'obs_id': 'mrc2', + 'api_http_base' : 'https://api.photonranch.org/api/', + 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + 'logs_http_base' : 'https://logs.photonranch.org/logs/', + 'status_http_base' : 'https://status.photonranch.org/status/', + # Name, local and owner stuff 'name': 'Mountain Ranch Camp Observatory 0m61 f6.8', 'location': 'Santa Barbara, California, USA', From 752f88a8b8ac47e397d5e91f841bff209bfb8fd3 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 21 Jun 2025 06:01:37 +1000 Subject: [PATCH 19/30] lcs config edit --- configs/lcs1/obs_config.py | 13 +++++++++---- obs.py | 18 +++++++++--------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index b22ac4124..bafdafb36 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -32,10 +32,15 @@ 'obs_id': 'lcs1', - 'api_http_base' : 'https://api.photonranch.org/api/', - 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', - 'logs_http_base' : 'https://logs.photonranch.org/logs/', - 'status_http_base' : 'https://status.photonranch.org/status/', + # 'api_http_base' : 'https://api.photonranch.org/api/', + # 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + # 'logs_http_base' : 'https://logs.photonranch.org/logs/', + # 'status_http_base' : 'https://status.photonranch.org/status/', + + 'api_http_base' : 'https://hub.nextastro.org/api/', + 'jobs_http_base' : 'https://hub.nextastro.org/jobs/', + 'logs_http_base' : 'https://hub.nextastro.org/logs/', + 'status_http_base' : 'https://hub.nextastro.org/status/', # Name, local and owner stuff 'name': 'Luther College Observatory, 10" Newtonian', diff --git a/obs.py b/obs.py index 87c991e5a..fe4d0047e 100644 --- a/obs.py +++ b/obs.py @@ -3286,15 +3286,15 @@ def ftp_process(self): try: print ("TRYING FTP") - ftp_upload_with_ftputil( - self.fitserver, - self.ftpport, - self.ftpusername, - self.ftppassword, - self.ftpremotedir, - filedirectory, - filename - ) + # ftp_upload_with_ftputil( + # self.fitserver, + # self.ftpport, + # self.ftpusername, + # self.ftppassword, + # self.ftpremotedir, + # filedirectory, + # filename + # ) except Exception: plog("Night Log did not write, usually not fatal.") plog(traceback.format_exc()) From 9e6bb7a159790cf16cbd4bf41c2ef647054fa61f Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 21 Jun 2025 07:50:06 +1000 Subject: [PATCH 20/30] api options updated --- configs/lcs1/obs_config.py | 10 +- obs.py | 200 ++++++++++++++++++++++++++----------- 2 files changed, 152 insertions(+), 58 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index bafdafb36..78cc8d539 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -37,11 +37,17 @@ # 'logs_http_base' : 'https://logs.photonranch.org/logs/', # 'status_http_base' : 'https://status.photonranch.org/status/', - 'api_http_base' : 'https://hub.nextastro.org/api/', - 'jobs_http_base' : 'https://hub.nextastro.org/jobs/', + # 'api_http_base' : 'https://hub.nextastro.org/api/', + # 'jobs_http_base' : 'https://hub.nextastro.org/jobs/', + # 'logs_http_base' : 'https://hub.nextastro.org/logs/', + # 'status_http_base' : 'https://hub.nextastro.org/status/', + + 'api_http_base' : 'https://hub.nextastro.org/', + 'jobs_http_base' : 'https://hub.nextastro.org/jobs', 'logs_http_base' : 'https://hub.nextastro.org/logs/', 'status_http_base' : 'https://hub.nextastro.org/status/', + # Name, local and owner stuff 'name': 'Luther College Observatory, 10" Newtonian', 'airport_code': 'MEL: Melbourne Airport', diff --git a/obs.py b/obs.py index fe4d0047e..54575c20a 100644 --- a/obs.py +++ b/obs.py @@ -1002,20 +1002,44 @@ def create_devices(self): print("--- Finished Initializing Devices ---\n") + # def get_wema_config(self): + # """ Fetch the WEMA config from AWS """ + # wema_config = None + # url = self.api_http_base + f"{self.wema_name}/config/" + # try: + # response = requests.get(url, timeout=20) + # wema_config = response.json()['configuration'] + # wema_last_recorded_day_dir = wema_config['events'].get('day_directory', '') + # plog(f"Retrieved wema config, lastest version is from day_directory {wema_last_recorded_day_dir}") + # except Exception as e: + # plog(traceback.format_exc()) + # breakpoint() + # plog.warn("WARNING: failed to get wema config!", e) + # return wema_config + def get_wema_config(self): """ Fetch the WEMA config from AWS """ wema_config = None url = self.api_http_base + f"{self.wema_name}/config/" try: response = requests.get(url, timeout=20) - wema_config = response.json()['configuration'] - wema_last_recorded_day_dir = wema_config['events'].get('day_directory', '') - plog(f"Retrieved wema config, lastest version is from day_directory {wema_last_recorded_day_dir}") + data = response.json() + # if the top‐level JSON has a 'wema_name' key, use the whole dict, + # otherwise pull out the 'configuration' sub-dict + if 'wema_name' in data: + wema_config = data + else: + wema_config = data.get('configuration', {}) + wema_last_recorded_day_dir = wema_config.get('events', {}).get('day_directory', '') + plog(f"Retrieved wema config, latest version is from day_directory {wema_last_recorded_day_dir}") except Exception as e: + plog(traceback.format_exc()) + breakpoint() plog.warn("WARNING: failed to get wema config!", e) return wema_config + def update_config(self): """Sends the config to AWS.""" @@ -3174,7 +3198,7 @@ def sendtouser_process(self): while not self.sendtouser_queue.empty(): (p_log, p_level) = self.sendtouser_queue.get(block=False) #url_log = "https://logs.photonranch.org/logs/newlog" - url_log = self.logs_http_base + "/newlog" + url_log = self.logs_http_base + "newlog" body = json.dumps( { "site": self.config["obs_id"], @@ -4794,48 +4818,89 @@ def get_enclosure_status_from_aws(self): #uri_status = f"https://status.photonranch.org/status/{self.wema_name}/enclosure/" uri_status = self.status_http_base + f"{self.wema_name}/enclosure/" try: - aws_enclosure_status = reqs.get(uri_status, timeout=20) - aws_enclosure_status = aws_enclosure_status.json() + # aws_enclosure_status = reqs.get(uri_status, timeout=20) + # aws_enclosure_status = aws_enclosure_status.json() + # aws_enclosure_status["site"] = self.name + + # for enclosurekey in aws_enclosure_status["status"]["enclosure"][ + # "enclosure1" + # ].keys(): + # aws_enclosure_status["status"]["enclosure"]["enclosure1"][ + # enclosurekey + # ] = aws_enclosure_status["status"]["enclosure"]["enclosure1"][ + # enclosurekey + # ][ + # "val" + # ] + + # if self.assume_roof_open: + # aws_enclosure_status["status"]["enclosure"]["enclosure1"][ + # "shutter_status" + # ] = "Sim. Open" + # aws_enclosure_status["status"]["enclosure"]["enclosure1"][ + # "enclosure_mode" + # ] = "Simulated" + + # try: + # # To stop status's filling up the queue under poor connection conditions + # # There is a size limit to the queue + # if self.send_status_queue.qsize() < 7: + # self.send_status_queue.put( + # (self.name, "enclosure", + # aws_enclosure_status["status"], self.status_http_base), + # block=False, + # ) + + # except Exception as e: + # plog.err("aws enclosure send failed ", e) + + # aws_enclosure_status = aws_enclosure_status["status"]["enclosure"][ + # "enclosure1" + # ] + + aws_resp = reqs.get(uri_status, timeout=20) + aws_enclosure_status = aws_resp.json() aws_enclosure_status["site"] = self.name - - for enclosurekey in aws_enclosure_status["status"]["enclosure"][ - "enclosure1" - ].keys(): - aws_enclosure_status["status"]["enclosure"]["enclosure1"][ - enclosurekey - ] = aws_enclosure_status["status"]["enclosure"]["enclosure1"][ - enclosurekey - ][ - "val" - ] - + + # drill into enclosure1 + enc1 = aws_enclosure_status \ + .get("status", {}) \ + .get("enclosure", {}) \ + .get("enclosure1", {}) + + # only replace v with v["val"] when v is a dict that has "val" + for key, v in list(enc1.items()): + if isinstance(v, dict) and "val" in v: + enc1[key] = v["val"] + # else: leave enc1[key] alone (it's already a primitive) + + # apply your simulated‐open override if self.assume_roof_open: - aws_enclosure_status["status"]["enclosure"]["enclosure1"][ - "shutter_status" - ] = "Sim. Open" - aws_enclosure_status["status"]["enclosure"]["enclosure1"][ - "enclosure_mode" - ] = "Simulated" - + enc1["shutter_status"] = "Sim. Open" + enc1["enclosure_mode"] = "Simulated" + + # now push only the cleaned status dict onto your queue try: - # To stop status's filling up the queue under poor connection conditions - # There is a size limit to the queue if self.send_status_queue.qsize() < 7: self.send_status_queue.put( - (self.name, "enclosure", - aws_enclosure_status["status"], self.status_http_base), + (self.name, "enclosure", aws_enclosure_status["status"], self.status_http_base), block=False, ) - except Exception as e: plog.err("aws enclosure send failed ", e) + + # finally reassign so downstream code sees just the enclosure1 dict + aws_enclosure_status = enc1 + - aws_enclosure_status = aws_enclosure_status["status"]["enclosure"][ - "enclosure1" - ] except Exception as e: plog.err("Failed to get aws enclosure status. Usually not fatal: ", e) + + plog.err(traceback.format_exc()) + plog.err("Failed rebooting, needs to be debugged") + breakpoint() + try: if self.devices["sequencer"].last_roof_status == "Closed" and aws_enclosure_status[ @@ -4898,31 +4963,54 @@ def get_weather_status_from_aws(self): ] = None try: - if ( - aws_weather_status["status"]["observing_conditions"][ - "observing_conditions1" - ] - == None - ): - aws_weather_status["status"]["observing_conditions"][ - "observing_conditions1" - ] = {"wx_ok": "Unknown"} + # if ( + # aws_weather_status["status"]["observing_conditions"][ + # "observing_conditions1" + # ] + # == None + # ): + # aws_weather_status["status"]["observing_conditions"][ + # "observing_conditions1" + # ] = {"wx_ok": "Unknown"} + # else: + # for weatherkey in aws_weather_status["status"]["observing_conditions"][ + # "observing_conditions1" + # ].keys(): + # aws_weather_status["status"]["observing_conditions"][ + # "observing_conditions1" + # ][weatherkey] = aws_weather_status["status"][ + # "observing_conditions" + # ][ + # "observing_conditions1" + # ][ + # weatherkey + # ][ + # "val" + # ] + + obs = aws_weather_status["status"]["observing_conditions"]["observing_conditions1"] + + #print (obs) + + # If it ever comes back None, give it a placeholder + if obs is None: + aws_weather_status["status"]["observing_conditions"]["observing_conditions1"] = {"wx_ok": "Unknown"} else: - for weatherkey in aws_weather_status["status"]["observing_conditions"][ - "observing_conditions1" - ].keys(): - aws_weather_status["status"]["observing_conditions"][ - "observing_conditions1" - ][weatherkey] = aws_weather_status["status"][ - "observing_conditions" - ][ - "observing_conditions1" - ][ - weatherkey - ][ - "val" - ] + # Only extract .val if it's really there + for key, val in list(obs.items()): + if isinstance(val, dict) and "val" in val: + obs[key] = val["val"] + # else: keep val untouched + #print (aws_weather_status) + # return aws_weather_status + + except: + + plog.err(traceback.format_exc()) + plog.err("Failed rebooting, needs to be debugged") + breakpoint() + plog.warn("bit of a glitch in weather status") aws_weather_status = {} aws_weather_status["status"] = {} From 93b7d056619769c563e9bd41e97fa8038cd60a3c Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 21 Jun 2025 19:39:28 +1000 Subject: [PATCH 21/30] files flying everywhere --- configs/lcs1/obs_config.py | 12 ++++++------ obs.py | 38 +++++++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index 78cc8d539..2abfb4290 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -220,7 +220,7 @@ 'rotator': 'rotator', 'selector': None, 'filter_wheel': 'RGGB', - 'camera': 'camera_1_1', + 'camera': 'lcy1qhy268c', 'sequencer': 'sequencer1' }, @@ -237,7 +237,7 @@ 'main_fw': None, # Cameras - 'main_cam': 'camera_1_1', + 'main_cam': 'lcy1qhy268c', 'guide_cam': None, 'widefield_cam': None, 'allsky_cam': None, @@ -358,12 +358,12 @@ 'rotator_name': 'rotator', 'has_instrument_selector': False, #This is a default for a single instrument system 'selector_positions': 1, #Note starts with 1 - 'instrument names': ['camera_1_1'], + 'instrument names': ['lcy1qhy268c'], 'instrument aliases': ['ASI071MCPro'], 'configuration': { "position1": ["darkslide1", "RGGB", "camera_1_1"] }, - 'camera_name': 'camera_1_1', + 'camera_name': 'lcy1qhy268c', #'filter_wheel_name': 'RGGB', 'filter_wheel_name': None, 'has_fans': False, @@ -457,7 +457,7 @@ 'shutdown_script': None, 'ports': 1, 'instruments': ['Main_camera'], #, 'eShel_spect', 'planet_camera', 'UVEX_spect'], - 'cameras': ['camera_1_1'], # , 'camera_1_2', None, 'camera_1_4'], + 'cameras': ['lcy1qhy268c'], # , 'camera_1_2', None, 'camera_1_4'], 'guiders': [None], # , 'guider_1_2', None, 'guide_1_4'], 'default': 0 }, @@ -497,7 +497,7 @@ 'camera': { - 'camera_1_1': { + 'lcy1qhy268c': { 'parent': 'Main OTA', 'name': 'lcy1qhy268c', #Important because this points to a server file structure by that name. 'desc': 'QHY 268C Pro', diff --git a/obs.py b/obs.py index 54575c20a..f5eda11c3 100644 --- a/obs.py +++ b/obs.py @@ -99,6 +99,7 @@ def http_upload(server, filedirectory, filename, upload_type): else: # fallback to raw text so you can see the error print(resp.text) + breakpoint() return True except: plog(traceback.format_exc()) @@ -1075,9 +1076,13 @@ def update_config(self): elif "ResponseMetadata" in response: if response["ResponseMetadata"]["HTTPStatusCode"] == 200: plog("Config uploaded successfully.") + + elif "status" in response: + if response['status'] == 'updated': + plog("Config uploaded successfully.") else: - plog("Response to obsid config upload unclear. Here is the response") - plog(response) + plog ("eHHH?") + plog (response) else: plog("Response to obsid config upload unclear. Here is the response") plog(response) @@ -1141,9 +1146,13 @@ def scan_requests(self, cancel_check=False): plog.warn("problem gathering scan requests. Likely just a connection glitch.") unread_commands = [] + # Make sure the list is sorted in the order the jobs were issued # Note: the ulid for a job is a unique lexicographically-sortable id. if len(unread_commands) > 0: + print (url_job) + print (body) + print (unread_commands) # As the stop_all_activity script flushes the commands # Any new commands imply there has been a new command since @@ -3368,7 +3377,7 @@ def http_process(self): ts = time.time() # Enqueue a tuple: (filedirectory, filename, timestamp) - self.http_queue.put((ingestion_folder, fname, ts, 'from_site')) + self.http_queue.put((ingestion_folder, fname, 'fromsite', ts )) plog(f"Enqueued new HTTP file: {fname}") # ─── 2) Once scanning is done, process whatever is in http_queue ─── @@ -3382,6 +3391,7 @@ def http_process(self): break try: + print (upload_type) print ("TRYING HTTP") success=http_upload( self.fitserver, @@ -4473,12 +4483,22 @@ def fast_to_aws(self): with open(filepath, "rb") as fileobj: files = {"file": (filepath, fileobj)} try: - reqs.post( - aws_resp["url"], - data=aws_resp["fields"], - files=files, - timeout=10, - ) + + #breakpoint() + try: + reqs.post( + aws_resp["url"], + data=aws_resp["fields"], + files=files, + timeout=10, + ) + except: + reqs.post( + aws_resp["url"], + #data=aws_resp["fields"], + files=files, + timeout=10, + ) except Exception as e: plog.err((traceback.format_exc())) if ( From ebedb11e2c14dba41e5d2b5bca2e92d58adf79ae Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Mon, 23 Jun 2025 18:02:04 +1000 Subject: [PATCH 22/30] mount name --- configs/lcs1/obs_config.py | 2 +- obs.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/configs/lcs1/obs_config.py b/configs/lcs1/obs_config.py index 2abfb4290..0268c9bd8 100644 --- a/configs/lcs1/obs_config.py +++ b/configs/lcs1/obs_config.py @@ -268,7 +268,7 @@ 'mount': { - 'lcy10inch': { + 'lcs1_pier': { 'parent': 'enclosure1', 'tel_id': '10inch', 'name': 'lcy10inch', diff --git a/obs.py b/obs.py index f5eda11c3..f45cee6b1 100644 --- a/obs.py +++ b/obs.py @@ -4917,9 +4917,9 @@ def get_enclosure_status_from_aws(self): except Exception as e: plog.err("Failed to get aws enclosure status. Usually not fatal: ", e) - plog.err(traceback.format_exc()) - plog.err("Failed rebooting, needs to be debugged") - breakpoint() + # plog.err(traceback.format_exc()) + # plog.err("Failed rebooting, needs to be debugged") + # breakpoint() try: From b0e06a0bece954e588bd10b35de2702d8ba450f7 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Wed, 25 Jun 2025 06:16:07 +1000 Subject: [PATCH 23/30] typo --- devices/camera.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/devices/camera.py b/devices/camera.py index 1740eb233..89980a748 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -3937,8 +3937,9 @@ def write_out_realtimefiles_token_to_disk(self, token_name, real_time_files): if self.is_osc: suffixes = ['B1', 'R1', 'G1', 'G2', 'CV'] - temp_file_holder=[] + for suffix in suffixes: + temp_file_holder=[] for tempfilename in real_time_files: temp_file_holder.append(tempfilename.replace('-EX00.', f'{suffix}-EX00.')) try: From 73c296ecc099f92296a61218be56ad9dab7d4282 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Mon, 30 Jun 2025 17:46:31 +1000 Subject: [PATCH 24/30] config updates --- configs/aro1/obs_config.py | 6 ++++++ configs/aro2/obs_config.py | 6 ++++++ configs/eco1/obs_config.py | 18 +++++++++++++----- configs/eco2/obs_config.py | 15 +++++++++++---- configs/eco3/obs_config.py | 6 ++++++ configs/mrc1/obs_config.py | 6 ++++++ configs/mrc2/obs_config.py | 6 ++++++ configs/tbo2/obs_config.py | 6 ++++++ 8 files changed, 60 insertions(+), 9 deletions(-) diff --git a/configs/aro1/obs_config.py b/configs/aro1/obs_config.py index 9dbe04c82..77672e64c 100644 --- a/configs/aro1/obs_config.py +++ b/configs/aro1/obs_config.py @@ -203,6 +203,12 @@ 'push_file_list_to_pipe_queue': False, + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', # The site can fully platesolve each image before it is sent off to s3 or a PIPE # If there are spare enough cycles at the site, this saves time for the PIPE diff --git a/configs/aro2/obs_config.py b/configs/aro2/obs_config.py index 73cf03646..473edd30a 100644 --- a/configs/aro2/obs_config.py +++ b/configs/aro2/obs_config.py @@ -185,6 +185,12 @@ 'push_file_list_to_pipe_queue': False, + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', # The site can fully platesolve each image before it is sent off to s3 or a PIPE # If there are spare enough cycles at the site, this saves time for the PIPE diff --git a/configs/eco1/obs_config.py b/configs/eco1/obs_config.py index 5860a8ba8..402e79773 100644 --- a/configs/eco1/obs_config.py +++ b/configs/eco1/obs_config.py @@ -130,11 +130,19 @@ 'push_file_list_to_pipe_queue': False, - - # Bisque mounts can't run updates in a thread ... yet... until I figure it out, - # So this is False for Bisques and true for everyone else. - 'run_main_update_in_a_thread': True, - 'run_status_update_in_a_thread' : True, + + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', + + + # # Bisque mounts can't run updates in a thread ... yet... until I figure it out, + # # So this is False for Bisques and true for everyone else. + # 'run_main_update_in_a_thread': True, + # 'run_status_update_in_a_thread' : True, # Minimum realistic seeing at the site. diff --git a/configs/eco2/obs_config.py b/configs/eco2/obs_config.py index 68d8fcd09..64c1fa18b 100644 --- a/configs/eco2/obs_config.py +++ b/configs/eco2/obs_config.py @@ -128,11 +128,18 @@ 'push_file_list_to_pipe_queue': False, + + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', - # Bisque mounts can't run updates in a thread ... yet... until I figure it out, - # So this is False for Bisques and true for everyone else. - 'run_main_update_in_a_thread': False, - 'run_status_update_in_a_thread' : True, + # # Bisque mounts can't run updates in a thread ... yet... until I figure it out, + # # So this is False for Bisques and true for everyone else. + # 'run_main_update_in_a_thread': False, + # 'run_status_update_in_a_thread' : True, # Minimum realistic seeing at the site. # This allows culling of unphysical results in photometry and other things diff --git a/configs/eco3/obs_config.py b/configs/eco3/obs_config.py index b0c128c77..20d583179 100644 --- a/configs/eco3/obs_config.py +++ b/configs/eco3/obs_config.py @@ -129,6 +129,12 @@ 'push_file_list_to_pipe_queue': False, + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', # Bisque mounts can't run updates in a thread ... yet... until I figure it out, # So this is False for Bisques and true for everyone else. diff --git a/configs/mrc1/obs_config.py b/configs/mrc1/obs_config.py index e1a946449..0fde39db0 100644 --- a/configs/mrc1/obs_config.py +++ b/configs/mrc1/obs_config.py @@ -180,6 +180,12 @@ 'push_file_list_to_pipe_queue': False, + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', # Bisque mounts can't run updates in a thread ... yet... until I figure it out, # So this is False for Bisques and true for everyone else. diff --git a/configs/mrc2/obs_config.py b/configs/mrc2/obs_config.py index d3f58c0ad..8c4d10707 100644 --- a/configs/mrc2/obs_config.py +++ b/configs/mrc2/obs_config.py @@ -218,6 +218,12 @@ 'solve_timer': 0.05, # Only solve every X minutes 'threshold_mount_update': 45, # only update mount when X arcseconds away 'push_file_list_to_pipe_queue': False, + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', # The site can fully platesolve each image before it is sent off to s3 or a PIPE # If there are spare enough cycles at the site, this saves time for the PIPE diff --git a/configs/tbo2/obs_config.py b/configs/tbo2/obs_config.py index 7325f44b3..bc570bc12 100644 --- a/configs/tbo2/obs_config.py +++ b/configs/tbo2/obs_config.py @@ -166,6 +166,12 @@ 'fully_platesolve_images_at_site_rather_than_pipe' : True, 'push_file_list_to_pipe_queue': False, + # LINKS TO PIPE FOLDER + 'save_images_to_pipe_for_processing': True, + 'pipe_save_method': 'local', # Can also be 'ftp' or 'http' for that transfer but also 'local' pipe for a local LAN pipe server + + 'ftp_ingestion_folder': 'C:/ftp_ingestion/', + 'http_ingestion_folder': 'C:/http_ingestion/', From b5d190576a3b9f1ebeaf84c4c1b6bcbe49701c1a Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Mon, 30 Jun 2025 17:49:00 +1000 Subject: [PATCH 25/30] configs #2 --- configs/eco1/obs_config.py | 13 +++++++++---- configs/eco2/obs_config.py | 13 +++++++++---- configs/eco3/obs_config.py | 13 +++++++++---- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/configs/eco1/obs_config.py b/configs/eco1/obs_config.py index 402e79773..f436d22b4 100644 --- a/configs/eco1/obs_config.py +++ b/configs/eco1/obs_config.py @@ -29,10 +29,15 @@ - 'api_http_base' : 'https://api.photonranch.org/api/', - 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', - 'logs_http_base' : 'https://logs.photonranch.org/logs/', - 'status_http_base' : 'https://status.photonranch.org/status/', + # 'api_http_base' : 'https://api.photonranch.org/api/', + # 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + # 'logs_http_base' : 'https://logs.photonranch.org/logs/', + # 'status_http_base' : 'https://status.photonranch.org/status/', + + 'api_http_base' : 'https://hub.nextastro.org/', + 'jobs_http_base' : 'https://hub.nextastro.org/jobs', + 'logs_http_base' : 'https://hub.nextastro.org/logs/', + 'status_http_base' : 'https://hub.nextastro.org/status/', # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m4f6.8', diff --git a/configs/eco2/obs_config.py b/configs/eco2/obs_config.py index 64c1fa18b..119305c5d 100644 --- a/configs/eco2/obs_config.py +++ b/configs/eco2/obs_config.py @@ -26,10 +26,15 @@ - 'api_http_base' : 'https://api.photonranch.org/api/', - 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', - 'logs_http_base' : 'https://logs.photonranch.org/logs/', - 'status_http_base' : 'https://status.photonranch.org/status/', + # 'api_http_base' : 'https://api.photonranch.org/api/', + # 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + # 'logs_http_base' : 'https://logs.photonranch.org/logs/', + # 'status_http_base' : 'https://status.photonranch.org/status/', + + 'api_http_base' : 'https://hub.nextastro.org/', + 'jobs_http_base' : 'https://hub.nextastro.org/jobs', + 'logs_http_base' : 'https://hub.nextastro.org/logs/', + 'status_http_base' : 'https://hub.nextastro.org/status/', # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m28', diff --git a/configs/eco3/obs_config.py b/configs/eco3/obs_config.py index 20d583179..40b72ea3b 100644 --- a/configs/eco3/obs_config.py +++ b/configs/eco3/obs_config.py @@ -28,12 +28,17 @@ 'obs_id': 'eco3', - 'api_http_base' : 'https://api.photonranch.org/api/', - 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', - 'logs_http_base' : 'https://logs.photonranch.org/logs/', - 'status_http_base' : 'https://status.photonranch.org/status/', + # 'api_http_base' : 'https://api.photonranch.org/api/', + # 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', + # 'logs_http_base' : 'https://logs.photonranch.org/logs/', + # 'status_http_base' : 'https://status.photonranch.org/status/', + 'api_http_base' : 'https://hub.nextastro.org/', + 'jobs_http_base' : 'https://hub.nextastro.org/jobs', + 'logs_http_base' : 'https://hub.nextastro.org/logs/', + 'status_http_base' : 'https://hub.nextastro.org/status/', + # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m4f6.8', 'airport_code': 'MEL: Melbourne Airport', From 046f37c7070e3c6cb9acf6bbe9e8aa0bcfadc697 Mon Sep 17 00:00:00 2001 From: Michael Fitzgerald Date: Thu, 3 Jul 2025 12:12:09 +1000 Subject: [PATCH 26/30] include maxim camera control and bggs1 --- configs/bggs1/obs_config.py | 1080 +++++++++++++++++++++++++++++++++++ devices/camera.py | 287 ++++++++-- devices/sequencer.py | 18 +- 3 files changed, 1314 insertions(+), 71 deletions(-) create mode 100644 configs/bggs1/obs_config.py diff --git a/configs/bggs1/obs_config.py b/configs/bggs1/obs_config.py new file mode 100644 index 000000000..83b059301 --- /dev/null +++ b/configs/bggs1/obs_config.py @@ -0,0 +1,1080 @@ +# -*- coding: utf-8 -*- +''' +obs_config.py obs_config.py obs_config.py obs_config.py obs_config.py +Created on Fri Feb 07, 11:57:41 2020 +Updated 20220914 WER This version does not support color camera channel. +Updates 20231102 WER This is meant to clean up and refactor wema/obsp +architecture + +@author: wrosing + +NB NB NB If we have one config file then paths need to change depending upon +hich host does what job. + +aro-0m30 10.0.0.73 +aro-wema 10.0.0.50 +Power Control 10.0.0.100 admin arot****** +Roof Control 10.0.0.200 admin arot****** /setup.html for setup. +Redis 10.0.0.174:6379 ; rds = redis.Redis(host='10.0.0.174', + port=6379); rds.set('wx', 'bogus'); rds.get('wx').decode() +Dragonfly Obsolete. + +Hubble V1 00:41:27.30 +41:10:10.4 +''' + +''' + Example : at 0.6 µm, at the F/D 6 focus of an instrument, the focusing tolerance which leads to a focusing \ + precision better than l/8 is 8*62*0.0006*(1/8) = 0.02 mm, ie ± 20 microns. + + F/d Tolerance + ± mm + + 2 0.0025 + + 3 0.005 + + 4 0.01 + + 5 0.015 + + 6 0.02 + + 8 0.04 + + 10 0.06 + + 12 0.09 + + 15 0.13 + + 20 0.24 + + 30 0.54 +''' + +# 1 1 1 +# 2 3 4 5 6 7 8 9 0 1 2 +#23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +import json + + +obs_id = 'bggs1' + +site_config = { + # Instance type specifies whether this is an obs or a wema + 'instance_type': 'obs', + # If this is not a wema, this specifies the wema that this obs is connected to + 'wema_name': 'bggs', + # The unique identifier for this obs + 'obs_id': 'bggs1', + + + # Name, local and owner stuff + 'name': 'Apache Ridge Observatory 0m3 f4.9/9', + + + 'site_is_public' : False, + + 'location': 'Santa Fe, New Mexico, USA', + # This is meant to be an optional informatinal website associated with the observatory. + 'telescope_description': 'CV 0m30 f4.9', + 'observatory_url': 'https://starz-r-us.sky/clearskies2', + 'observatory_logo': None, # + 'mpc_code': 'ZZ23', # This is made up for now. + 'dedication': ''' + Now is the time for all good persons + to get out and vote, lest we lose + charge of our democracy. + ''', # i.e, a multi-line text block supplied and formatted by the owner. + 'owner': ['google-oauth2|102124071738955888216', \ + 'google-oauth2|112401903840371673242'], # Neyle, + 'owner_alias': ['ANS', 'WER'], + 'admin_aliases': ["ANS", "WER", "TELOPS", "TB"], + + + "platesolve_timeout": 60, # Default should be about 45 seconds, but slower computers will take longer + # Default safety settings + 'safety_check_period': 120, # MF's original setting was 45. + + # Degrees. For normal pointing requests don't go this close to the sun. + 'closest_distance_to_the_sun': 30, + # Degrees. For normal pointing requests don't go this close to the moon. + 'closest_distance_to_the_moon': 5, + 'minimum_distance_from_the_moon_when_taking_flats': 30, + # Degrees. For normal pointing requests don't allow requests to go this low. + 'lowest_requestable_altitude': 15, + # Below this altitude, it will automatically try to home and park the scope to recover. + 'lowest_acceptable_altitude': -10, + 'degrees_to_avoid_zenith_area_for_calibrations': 0, + 'degrees_to_avoid_zenith_area_in_general': 0, + 'maximum_hour_angle_requestable': 9, + # NB NB WER ARO Obs has a chiller + 'temperature_at_which_obs_too_hot_for_camera_cooling': 32, + + + # These are the default values that will be set for the obs + # on a reboot of obs.py. They are safety checks that + # can be toggled by an admin in the Observe tab. + + # # Engineering start + + # 'scope_in_manual_mode': True, + # 'scope_in_engineering_mode': True, + # 'mount_reference_model_off': True, + # 'sun_checks_on': False, + # 'moon_checks_on': False, + # 'altitude_checks_on': False, + # 'daytime_exposure_time_safety_on': False, + # 'simulate_open_roof': True, + # 'auto_centering_off': True, + # 'self_guide_on': False, + # 'always_do_a_centering_exposure_regardless_of_nearby_reference': False, #this is a qustionable setting + # 'owner_only_commands':True, + + # #SAFESTART + + 'scope_in_manual_mode': False, + 'scope_in_engineering_mode': False, + 'mount_reference_model_off': False, + 'sun_checks_on': True, + 'moon_checks_on': True, + 'altitude_checks_on': True, + 'daytime_exposure_time_safety_on': True, #Perhaps condition by roof open/closed? + 'simulate_open_roof': False, + 'auto_centering_off': False, + 'self_guide_on': True, + 'always_do_a_centering_exposure_regardless_of_nearby_reference': True, + 'owner_only_commands': False, + + + # Setup of folders on local and network drives. + 'ingest_raws_directly_to_archive': False, # This it the OCS-archive, archive-photonranch.org + # LINKS TO PIPE FOLDER + 'save_raws_to_pipe_folder_for_nightly_processing': False, + # WER changed Z to X 20231113 @1:16 UTC + 'pipe_archive_folder_path': 'X:/localptrarchive/', +# These are options to minimise diskspace for calibrations + 'produce_fits_file_for_final_calibrations': True, + 'save_archive_versions_of_final_calibrations' : False, + + # 'temporary_local_pipe_archive_to_hold_files_while_copying' : 'F:/tempfolderforpipeline', + # LINKS FOR OBS FOLDERS + 'client_hostname': "ARO-0m30", # Generic place for this host to stash. + 'archive_path': 'C:/ptr/', + 'alt_path': 'C:/alt/', + # 'temporary_local_alt_archive_to_hold_files_while_copying' : 'F:/tempfolderforaltpath', + + 'save_to_alt_path': 'no', + # THIS FOLDER HAS TO BE ON A LOCAL DRIVE, not a network drive due to the necessity of huge memmap files + 'local_calibration_path': 'C:/ptr/', + # Number of days to keep files in the local archive before deletion. Negative means never delete + 'archive_age': 2.0, + + + 'plog_path': 'C:/ptr/bggs1/', # place where night logs can be found. May not be used on aro1 wer 20250322 + + + #'redis_available': True, + #'redis_ip': "10.0.0.174:6379", + + # Scratch drive folder + 'scratch_drive_folder': 'D:/obstemp/', + + + # For low bandwidth sites, do not send up large files until the end of the night. set to 'no' to disable + 'send_files_at_end_of_night': 'no', + # For low diskspace sites (or just because they aren't needed), don't save a separate raw file to disk after conversion to fz. + 'save_raw_to_disk': False, + # PTR uses the reduced file for some calculations (focus, SEP, etc.). To save space, this file can be removed after usage or not saved. + 'keep_reduced_on_disk': False, + # To save space, the focus file can not be saved. + 'keep_focus_images_on_disk': False, + # A certain type of naming that sorts filenames by numberid first + 'save_reduced_file_numberid_first': True, + # Number of files to send up to the ptrarchive simultaneously. + 'number_of_simultaneous_ptrarchive_streams': 4, + # Number of files to send over to the pipearchive simultaneously. + 'number_of_simultaneous_pipearchive_streams': 4, + # Number of files to send over to the altarchive simultaneously. + 'number_of_simultaneous_altarchive_streams': 4, + + + + 'push_file_list_to_pipe_queue': False, + + # The site can fully platesolve each image before it is sent off to s3 or a PIPE + # If there are spare enough cycles at the site, this saves time for the PIPE + # to concentrate on more resource heavy reductions. + # Also leads to fully platesolved reduced images on the local site computer + # Usually set this to True + # if the scope has a decent NUC.... CURRENTLY LEAVE AS IS UNTIL MTF HAS FINISHED TESTING THIS. + 'fully_platesolve_images_at_site_rather_than_pipe' : True, + + + # Bisque mounts can't run updates in a thread ... yet... until I figure it out, + # So this is False for Bisques and true for everyone else. + 'run_main_update_in_a_thread': True, + 'run_status_update_in_a_thread': True, + + # Minimum realistic seeing at the site. + # This allows culling of unphysical results in photometry and other things + # Particularly useful for focus + 'minimum_realistic_seeing': 1.5, + 'has_lightning_detector': False, + + # TIMING FOR CALENDAR EVENTS + # How many minutes with respect to eve sunset start flats + 'bias_dark interval': 120., # minutes + # Was 55 WER 20240313 Before Sunset Minutes neg means before, + after. + 'eve_sky_flat_sunset_offset': -30., + # How many minutes after civilDusk to do.... + 'end_eve_sky_flats_offset': 15., + 'clock_and_auto_focus_offset': 15, # min before start of observing + 'astro_dark_buffer': 35, # Min before and after AD to extend observing window + 'morn_flat_start_offset': -10., # min from Sunrise + 'morn_flat_end_offset': +40., # min from Sunrise + + + + + # Exposure times for standard system exposures + 'focus_exposure_time': 5, # Exposure time in seconds for exposure image + 'pointing_exposure_time': 12, # Exposure time in seconds for exposure image + + # How often to do various checks and such + 'observing_check_period': 3, # How many minutes between weather checks + 'enclosure_check_period': 3, # How many minutes between enclosure checks + + # Turn on and off various automated calibrations at different times. + 'auto_eve_bias_dark': False, + 'auto_eve_sky_flat': True, + # Units?? Just imposing a minimum in case of a restart. + 'time_to_wait_after_roof_opens_to_take_flats': 3, + # WER 20240303 Afternoon, changed from True + 'auto_midnight_moonless_bias_dark': True, + 'auto_morn_sky_flat': True, + 'auto_morn_bias_dark': False, + + # FOCUS OPTIONS + # This is a time, in hours, over which to bypass automated focussing (e.g. at the start of a project it will not refocus if a new project starts X hours after the last focus) + 'periodic_focus_time': 2, + 'stdev_fwhm': 0.4, # This is the expected variation in FWHM at a given telescope/camera/site combination. This is used to check if a fwhm is within normal range or the focus has shifted + 'focus_trigger': 0.5, # What FWHM increase is needed to trigger an autofocus + + # PLATESOLVE options + 'solve_nth_image': 1, # Only solve every nth image + 'solve_timer': 0.05, # Only solve every X minutes NB WER 3 seconds???? + 'threshold_mount_update': 45, # only update mount zero point when X arcseconds away + # units? maximum radial drift allowed for a correction when running a block + 'limit_mount_tweak': 15, + + 'defaults': { + 'screen': 'screen', + 'mount': 'aropier1', + # 'telescope': 'Main OTA', #How do we handle selector here, if at all? + 'focuser': 'focuser', + # 'rotator': 'rotator', + 'selector': None, + 'filter_wheel': 'LCO FW50_001d', + 'camera': 'sq003ms', + 'sequencer': 'sequencer' + }, + # Initial roles are aassigned here. These may change during runtime. + # Value is the device display name + # This is where to configure a second device of the same type if you want to control it in the site code. + # Devices are referenced in obs with self.devices['device_role'] + # Also important to note: these must match the roles in obs.py create_devices(). + # Roles are standardized across all sites even if not all roles are used at each site. + 'device_roles': { + 'mount': 'aropier1', + 'main_rotator': None, + 'main_focuser': 'focuser', + 'main_fw': 'LCO FW50_001d', + + # Cameras + 'main_cam': 'bggsfli01', + 'guide_cam': None, + 'widefield_cam': None, + 'allsky_cam': None, + }, + + # The LCO scheduler references a description of this site in configdb + # The logic in configdb is organized slightly differently than the PTR + # config files (like this one), but they should ultimately represent the + # same underlying hardware. + # When a PTR obsevatory is running an observation created by the scheduler, + # we'll use this to figure out what devices to use to run that observation. + # The key is the instrument name from configdb, and the value is a dict of + # device names from this config file for each type of device. + # + # This should only be modified if the configuration in configdb changes. + 'configdb_instrument_mapping': { + 'qhy600-2a': { + 'mount': 'aropier1', + 'camera': 'sq003ms', + 'filter_wheel': 'LCO FW50_001d', + 'rotator': None, + 'focuser': 'focuser' + }, + 'qhy600-2b': { + 'mount': 'aropier1', + 'camera': 'sq003ms', + 'filter_wheel': 'LCO FW50_001d', + 'rotator': None, + 'focuser': 'focuser' + } + }, + 'configdb_telescope': '0m3', + 'configdb_enclosure': 'roof', + + 'device_types': [ + 'mount', + # 'telescope', + # 'screen', + # 'rotator', + 'selector', + 'filter_wheel', + 'focuser', + 'camera', + 'sequencer', + ], + 'short_status_devices': [ + 'mount', + # 'telescope', + # 'screen', + 'rotator', + 'focuser', + 'selector', + 'filter_wheel', + 'camera', + 'sequencer', + ], + + + 'mount': { + 'aropier1': { + 'parent': 'enclosure1', + 'name': 'aropier1', + # Can be a name if local DNS recognizes it. + 'hostIP': '10.0.0.140', + 'hostname': 'safpier', + 'desc': 'AP 1600 GoTo', + 'driver': 'AstroPhysicsV2.Telescope', + # this is redundnat with a term below near model. + 'alignment': 'Equatorial', + # degrees floating, 0.0 means do not apply this constraint. + 'default_zenith_avoid': 0.0, + # Some mounts report they have finished slewing but are still vibrating. This adds in some buffer time to a wait for slew. + 'wait_after_slew_time': 0.0, + 'needs_to_wait_for_dome' : False, + + # paddle refers to something supported by the Python code, not the AP paddle. + 'has_paddle': False, + # Presumably this is the AltAzDServer from Optec. + 'has_ascom_altaz': False, + # This can be changed to 'tel2'... by user. This establishes a default. + 'pointing_tel': 'tel1', + + 'home_after_unpark': False, + 'home_before_park': False, + + 'settle_time_after_unpark': 5, + 'settle_time_after_park': 5, + # + # if this is set to yes, it will reset the mount at startup and when coordinates are out significantly + 'permissive_mount_reset': 'no', + # How many seconds of inactivity until it will park the telescope + 'time_inactive_until_park': 900.0, + + # final: 0.0035776615398219747 -0.1450812805892454 + 'west_clutch_ra_correction': 0.0, + 'west_clutch_dec_correction': 0.0, + 'east_flip_ra_correction': 0.0, # Initially -0.039505313212952586, + 'east_flip_dec_correction': 0.0, # initially -0.39607711292257797, + 'settings': { + # Decimal degrees, North is Positive These *could* be slightly different than site. + 'latitude_offset': 0.0, + # Decimal degrees, West is negative #NB This could be an eval( <>)) + 'longitude_offset': 0.0, + 'elevation_offset': 0.0, # meters above sea level + 'home_altitude': 0.0, + 'home_azimuth': 0.0, + # Meant to be a circular horizon. Or set to None if below is filled in. + 'horizon': 25., + 'horizon_detail': { # Meant to be something to draw on the Skymap with a spline fit. + '0.0': 25., + '90': 25., + '180': 25., + '270': 25., + '359': 25. + }, # We use a dict because of fragmented azimuth measurements. + 'ICRS2000_input_coords': True, + # Refraction is applied during pointing. + 'refraction_on': True, + 'model_on': True, + 'model_type': "Equatorial", + # Rates implied by model and refraction applied during tracking. + 'rates_on': True, + # In the northern hemisphere, positive MA means that the pole of the mounting + # is to the right of due north. + # In the northern hemisphere, positive ME means that the pole of the mounting is + # below the true (unrefracted) pole. A mounting aligned the refracted pole (for most + # telescopes probably the simplest and best thing to aim for in order to avoid unwanted + # field rotation effects will have negative ME. 'model_date': "n.a.", + # units for model are asec/radian + 'model_equat': { + # Home naturally points to West for AP GEM mounts. Howeveer when @ Park 5 it is flipped. + 'ih': 0.0, + # These two are zero-point references for HA/Ra and dec. + 'id': 0.0, + 'eho': 0.0, # East Hour angle Offset -- NOTE an offset + 'edo': 0.0, # East Dec Offset + 'ma': 0.0, # Azimuth error of polar axis + 'me': 0.0, # Elev error of polar axisDefault is about -60 asec above pole for ARO + 'ch': 0.0, # Optical axis not perp to dec axis + 'np': 0.0, # Non-perp of polar and dec axis + 'tf': 0.0, # Sin flexure -- Hook's law. + 'tx': 0.0, # Tangent flexure + 'hces': 0.0, # Sin centration error of RA encoder + 'hcec': 0.0, # Cos centration error of RA encoder + 'dces': 0.0, # Sin centration error of DEC encoder + 'dcec': 0.0, # Cos centration error of DEC encoder + } # 'model_version': 'N.A', # As in "20240526-1.mod" Eventually we can put the model name here and pick up automatically. + + + + + , + 'model_altAz': { + # "Home naturally points to West for AP GEM mounts. + 'ia': 000.00, + 'ie': 0.00, # These two are zero-point references. + 'eho': 0.0, # "East Hour angle Offset -- NOTE an offset + 'edo': 0.0, # "East Dec Offset + 'ma': 0.0, + 'me': 0.0, # Default is about -60 asec above pole for ARO + 'ch': 0.0, + 'np': 0.0, + 'tf': 0.0, + 'tx': 0.0, + 'hces': 0.0, + 'hcec': 0.0, + 'dces': 0.0, + 'dcec': 0.0, + } + }, + }, + + }, + + + + 'telescope': { # OTA = Optical Tube Assembly. + 'Main OTA': { + 'parent': 'aropier1', + 'name': 'Main OTA', + 'telescop': 'aro1', + 'desc': 'Ceravolo 300mm F4.9/F9 convertable', + 'telescope_description': 'Ceravolo 0m30 F4.9/F9 Astrograph', + 'ptrtel': 'cvagr-0m30-f4.9-f4p9-001', + # Essentially this device is informational. It is mostly about the optics. + 'driver': None, + 'collecting_area': 31808, # This is correct as of 20230420 WER + # Informatinal, already included in collecting_area. + 'obscuration': 0.55, + 'aperture': 30, + # 1470, #2697, # Converted to F9, measured 20200905 11.1C 1468.4 @ F4.9? + 'focal_length': 1468.4, + 'has_dew_heater': False, + 'screen_name': 'screen', + 'focuser_name': 'focuser', + 'rotator_name': 'rotator', + # This is a default for a single instrument system + 'has_instrument_selector': False, + 'selector_positions': 1, # Note starts with 1 + 'instrument names': ['sq003ms'], + 'instrument aliases': ['QHY600Mono'], + 'configuration': { + # This needs expanding into something easy for the owner to change. + 'f-ratio': 'f4.9', + "position1": ["darkslide1", "LCO FW50_001d", "sq003ms"], + }, + 'camera_name': 'sq003ms', + 'filter_wheel_name': 'LCO FW50_001d', + 'has_fans': True, + 'has_cover': False, + # East is negative These will vary per telescope. + 'axis_offset_east': -19.5, #Inches appently! + 'axis_offset_south': -8, # South is negative + + 'settings': { + 'fans': ['Auto', 'High', 'Low', 'Off'], + # If the mount model is current, these numbers are usually near 0.0 + 'offset_collimation': 0.0, + # for tel1. Units are arcseconds. + 'offset_declination': 0.0, + 'offset_flexure': 0.0, + 'west_flip_ha_offset': 0.0, # new terms. + 'west_flip_ca_offset': 0.0, + 'west_flip_dec_offset': 0.0 + }, + + + + }, + + }, + + 'rotator': { + 'rotator': { + 'parent': 'Main OTA', + 'name': 'rotator', + 'desc': 'Opetc Gemini', + 'driver': 'ASCOM.OptecGemini.Rotator', + 'com_port': 'COM10', + 'minimum': -180., + 'maximum': 360.0, + 'step_size': 0.0001, # Is this correct? + 'backlash': 0.0, + 'throw': 300, + 'unit': 'degree' # 'steps' + }, + + }, + + 'screen': { + 'screen': { + 'parent': 'telescope1', + 'name': 'screen', + 'desc': 'Optec Alnitak 16"', + 'driver': 'COM14', # This needs to be a 4 or 5 character string as in 'COM8' or 'COM22' + # This is the % of light emitted when Screen is on and nominally at 0% bright. + 'minimum': 5, + # Out of 0 - 255, this is the last value where the screen is linear with output. + 'saturate': 255, + # These values have a minor temperature sensitivity yet to quantify. + + + }, + + + }, + + 'focuser': { + 'focuser': { + 'parent': 'telescope1', + 'name': 'focuser', + 'desc': 'Optec Gemini', + 'driver': 'ASCOM.PWI3.Focuser', + 'com_port': 'COM13', # AP 'COM5' No Temp Probe on SRO AO Honders + 'start_at_config_reference': False, + 'correct_focus_for_temperature': True, + # highest value to consider as being in "good focus". Used to select last good focus value + 'maximum_good_focus_in_arcsecond': 5.0, + 'focuser_movement_settle_time': 3, + # F4.9 setup + 'reference': 37000.2, # 20241204 + 'ref_temp': 7.5, # Average for the fit ~ 27.5 degrees wide +20 to -75 + 'temp_coeff': -24.974, # R^2 = 0.769 + + 'relative_focuser': False, + + # F9 setup + # 'reference': unknown, + # 'temp_coeff': unknown, # Meas -12 c to 4C so nominal -4C + # microns per degree of tube temperature + 'z_compression': 0.0, # microns per degree of zenith distance + 'z_coef_date': '20240820', + # NB this area is confusing steps and microns, and needs fixing. + 'minimum': 0, + 'maximum': 12600, # 12672 actually + 'step_size': 1, + 'backlash': 600, # non-zero means enabled, + means over-travel when moving out, then come back IN same amount. + 'throw': 90., #20240925 reduced from: #140, # Start with 10X focus tolerance. + 'focus_tolerance': 130, #Microns ??? used Golf Focus Caclulator + 'unit': 'micron', + 'unit_conversion': 1.0, + 'has_dial_indicator': False + + + }, + + }, + + 'selector': { + 'selector': { + 'parent': 'Main OTA', + 'name': 'selector', + 'desc': 'Null Changer', + 'driver': None, + 'com_port': None, + 'startup_script': None, + 'recover_script': None, + 'shutdown_script': None, + 'ports': 1, + # 'eShel_spect', 'planet_camera', 'UVEX_spect'], + 'instruments': ['Aux_camera'], + + 'cameras': ['sq003ms'], # 'camera_1_2', None, 'camera_1_4'], + + 'guiders': [None], # 'guider_1_2', None, 'guide_1_4'], + 'default': 0 + }, + + }, + + 'filter_wheel': { + "LCO FW50_001d": { + "parent": "Main OTA", + "name": "LCO FW50_001d", + 'service_date': '20210716', + + + # sec WER 20240303 continuing test. how long to wait for the filter to settle after a filter change(seconds) + "filter_settle_time": 1, + # This ignores the automatically estimated filter gains and starts with the values from the config file + 'override_automatic_filter_throughputs': False, + + "driver": "Maxim.CCDCamera", # 'ASCOM.FLI.FilterWheel', #'MAXIM', + 'ip_string': 'http://10.0.0.110', + "dual_wheel": True, + 'filter_reference': 'PL', + 'settings': { + # 'filter_count': 23, + # "filter_type": "50mm_sq.", + # "filter_manuf": "Astrodon", + # 'home_filter': 1, + 'default_filter': "PL", + 'focus_filter': 'PL', + # 'filter_reference': 1, # We choose to use PL as the default filter. Gains taken at F9, Ceravolo 300mm + # Columns for filter data are : ['filter', 'filter_index', 'filter_offset', 'sky_gain', 'screen_gain', 'alias'] + # NB NB Note to WER please add cwl, bw and 'shape'. Throughputs ADJUSTED 20240103 Eve run + + + # 'filter_data': [ + # ['Air', [0, 0], -800, 1200., [2 , 20], 'AIR'], #0 Gains est and some from 20240106 listing + # ['PL', [7, 0], 0, 1100., [360 , 170], 'Photo Luminance - does not pass NIR'], #1 + # ['Exo', [8, 0], 0, 915., [360 , 170], 'Exoplanet - yellow, no UV or far NIR'], #2 + # ['PB', [0, 6], 0, 700, [360 , 170], 'Photo Blue'], #3 + # ['gp', [2, 0], 0, 820., [.77 , 20], "Sloan g'"], #4 + # ['PR', [0, 8], 0, 520., [.32 , 20], 'Photo Blue'], #5 + # ['PG', [0, 7], 0, 470., [30 , 170], 'Photo Green'], #6 + # ['BB', [9, 0], 0, 500., [0.65, 20], 'Bessell B'], #7 + # ['BV', [10, 0], 0, 540., [.32 , 20], 'Bessell V'], #8 + # ['BR', [11, 0], 0, 600., [10 , 170], 'Bessell R'], #9 + # ['rp', [3, 0], 0, 560., [1.2 , 20], "Sloan r'"], #10 + # ['NIR', [0, 10], 0, 226., [0.65, 20], 'Near IR - redward of PL'], #11 Value suspect 2023/10/23 WER + # ['ip', [4, 0], 0, 250., [.65 , 20], "Sloan i'"], #12 + # ['zs', [5, 0], 0, 250., [.65 , 20], "Sloan Z-short'"], #12 + # ['BI', [12, 0], 0, 155., [360 , 170], 'Bessell I'], #13 + # ['up', [1, 0], 0, 39.0, [2 , 20], "Sloan u'"], #14 + # ['O3', [0, 2], 0, 36.0, [360 , 170], 'Oxygen III'], #15 #guess + # ['zp', [0, 9], 0, 11.0, [1.0 , 20], "Sloan z'-wide"], #16 # NB ZP is a broader filter than zs. + # ['CR', [0, 5], 0, 9.0, [360 , 170], 'Continuum Re' - for Star subtraction'], #17 + # ['HA', [0, 3], 0, 8.0, [360 , 170], 'Hydrogen Alpha - aka II'], #18 + # ['N2', [13, 0], 0, 4.5, [360 , 170], 'Nitrogen II'], #19 + # ['S2', [0, 4], 0, 4.5, [0.65, 20], 'Sulphur II'], #20 + + # ['Y', [6, 0], 0, 7.3, [360 , 170], "Rubin Y - low throughput, defective filter in top area "], #21 + + + # ['dark', [1, 3], 0, 0.00, [360 , 170], 'dk']], #22 #Not a real filter. Total 23 + #Front filter wheel is LCO Square 50 mm 10 positions + #Back (near camera wheel is LCO 50mm rount with 13 positions so + #the capacity is air + 23 filters.v) + 'filter_data': [ + ['Air', [0, 0], 'AIR'], # 0 + ['PL', [7, 0], 'Photo Luminance'], # 1 + ['Exo', [8, 0], 'Exoplanet'], # 2 + ['PB', [0, 6], 'Photo Blue'], # 3 + ['gp', [2, 0], "Sloan g"], # 4 + ['PR', [0, 8], 'Photo Red'], # 5 + ['PG', [0, 7], 'Photo Green'], # 6 + ['BB', [9, 0], 'Bessell B'], # 7 + ['BV', [10, 0], 'Bessell V'], # 8 + # ['BR', [11, 0], 'Bessell R'], #9 + ['rp', [3, 0], "Sloan r"], # 10 + # ['NIR', [0, 10], 'Near IR'], #11 Value suspect 2023/10/23 WER + ['ip', [4, 0], "Sloan i"], # 12 + # ['BI', [12, 0], 'Bessell I'], #13 + ['up', [1, 0], "Sloan u"], # 14 + ['O3', [0, 2], 'Oxygen III'], # 15 #guess + # 16 # NB ZP is a broader filter than zs. + ['zs', [0, 9], "Sloan z-short"], + # ['CR', [0, 5], 'Continuum Red - for Star subtraction'], #17 + ['HA', [0, 3], 'Hydrogen Alpha'], # 18 + ['N2', [13, 0], 'Nitrogen II'], # 19 + ['S2', [0, 4], 'Sulphur II'], # 20 + + # ['Y', [6, 0], "Rubin Y"], #21 + + + ['dk', [1, 3], 'dk']], # 22 #Not a real filter. Total 23 + + # 'filter_screen_sort': ['ip'], # don't use narrow yet, 8, 10, 9], useless to try. + # 'filter_sky_sort': ['S2','N2','HA','CR','zs','zp','up','O3','BI','NIR','ip','PR','BR',\ + # 'rp','PG','BV','BB','PB','gp','EXO','PL','air'], #Needs fixing once we get a good input series. 20240106 WER + + + + + }, + + + + + }, + }, + + 'lamp_box': { + 'lamp_box1': { + 'parent': 'None', # Parent is camera for the spectrograph + 'name': 'None', # "UVEX Calibration Unit", 'None' + 'desc': 'None', # 'eshel', # "uvex", 'None' + 'spectrograph': 'None', # 'echelle', 'uvex'; 'None' + 'driver': 'None', # ASCOM.Spox.Switch; 'None'; Note change to correct COM port used for the eShel calibration unit at mrc2 + 'switches': "None" # A string of switches/lamps the box has for the FITS header. # 'None'; "Off,Mirr,Tung,NeAr" for UVEX + }, + }, + + 'camera': { + 'bggsfli01': { + 'parent': 'telescope1', + # Important because this points to a server file structure by that name. + 'name': 'bggsfli01', + 'desc': 'QHY 600Pro', + 'overscan_trim': 'QHY600', + 'service_date': '20240604', + # 'driver': "ASCOM.QHYCCD.Camera", #"Maxim.CCDCamera", # "ASCOM.QHYCCD.Camera", ## 'ASCOM.FLI.Kepler.Camera', + # NB Be careful this is not QHY Camera2 or Guider "Maxim.CCDCamera", #'ASCOM.FLI.Kepler.Camera', "ASCOM.QHYCCD.Camera", # + 'driver': "Maxim.CCDCamera", + + 'detector': 'Sony IMX455', + 'manufacturer': 'QHY', + 'use_file_mode': False, + 'file_mode_path': 'G:/000ptr_saf/archive/sq003ms/autosaves/', + + + 'settings': { + + # These are the offsets in degrees of the actual telescope from the latitude and longitude of the WEMA settings + 'north_offset': 0.0, # These three are normally 0.0 for the primary telescope + 'east_offset': 0.0, + # If there is sufficient memory ... OR .... not many flats, it is faster to keep the flats in memory. + # If there is sufficient memory ... OR .... not many flats, it is faster to keep the flats in memory. + 'hold_flats_in_memory': True, + + # Simple Camera Properties + 'is_cmos': True, + 'is_osc': False, + 'is_color': False, # NB we also have a is_osc key. + 'osc_bayer': 'RGGB', + + # There are some infuriating popups on theskyx that manually + # need to be dealt with when doing darks and lights. + # This setting uses a workaround for that. This is just for CMOS + # CCDs are fine. + 'cmos_on_theskyx': False, + + # For direct QHY usage we need to set the appropriate gain. + # This changes from site to site. "Fast" scopes like the RASA need lower gain then "slow". + # Sky quality is also important, the worse the sky quality, the higher tha gain needs to be + # Default for QHY600 is GAIN: 26, OFFSET: 60, readout mode 3. + # Random tips from the internet: + # After the exposure, the background in the image should not be above 10% saturation of 16Bit while the brightest bits of the image should not be overexposed + # The offset should be set so that there is at least 300ADU for the background + # I guess try this out on the standard smartstack exposure time. + # https://www.baader-planetarium.com/en/blog/gain-and-offset-darks-flats-and-bias-at-cooled-cmos-cameras/ + # + # Also the "Readout Mode" is really important also + # Readout Mode #0 (Photographic DSO Mode) + # Readout Mode #1 (High Gain Mode) + # Readout Mode #2 (Extended Fullwell Mode) + # Readout Mode #3 (Extended Fullwell Mode-2CMS) + # + # With the powers invested in me, I have decided that readout mode 3 is the best. We can only pick one standard one + # and 0 is also debatably better for colour images, but 3 is way better for dynamic range.... + # We can't swip and swap because the biases and darks and flats will change, so we are sticking with 3 until + # something bad happens with 3 for some reason + # + + + # THIS IS THE PRE-TESTING SETTINGS FOR NEW MODE 11 Mar 2024 + # # In that sense, QHY600 NEEDS to be set at GAIN 26 and the only thing to adjust is the offset..... + # # USB Speed is a tradeoff between speed and banding, min 0, max 60. 60 is least banding. Most of the + # # readout seems to be dominated by the slow driver (difference is a small fraction of a second), so I've left it at 60 - least banding. + # 'direct_qhy_readout_mode' : 3, + # 'direct_qhy_gain' : 26, + # 'direct_qhy_offset' : 60, + # #'direct_qhy_usb_speed' : 50, + # 'direct_qhy_usb_traffic' : 45, #Early 20240103 = 50, not clear earlier but better than before. + # #The pattern before came and went. Now consitent at 50. Changing to 45. + # #Which one of these is actually used? + # 'set_qhy_usb_speed': True, + # 'direct_qhy_usb_speed' : 45, #20240106 Afternoon WER Was 60 + + + + # # In that sense, QHY600 NEEDS to be set at GAIN 26 and the only thing to adjust is the offset..... + # # USB Speed is a tradeoff between speed and banding, min 0, max 60. 60 is least banding. Most of the + # # readout seems to be dominated by the slow driver (difference is a small fraction of a second), so I've left it at 60 - least banding. + # 'direct_qhy_readout_mode' : 0, + # 'direct_qhy_gain' : 26, + # 'direct_qhy_offset' : 60, + # #'direct_qhy_usb_speed' : 50, + # 'direct_qhy_usb_traffic' : 45, #Early 20240103 = 50, not clear earlier but better than before. + # #The pattern before came and went. Now consitent at 50. Changing to 45. + # #Which one of these is actually used? + # 'set_qhy_usb_speed': True, + + # #"speed isn't used I think - MTF, it is actually USB Traffic + # #'direct_qhy_usb_speed' : 45, #20240106 Afternoon WER Was 60 + + + # HERE IS THE POTENTIAL MODE 1 SETTING: Verified 20250220 WER + 'direct_qhy_readout_mode': 1, #High Gain mode + 'direct_qhy_gain': 60, #Above the QHY low noise Knee for High Gain mode. + 'direct_qhy_offset': 30, #means a basic bias level of around 70 ADU's + # 'direct_qhy_usb_speed' : 50, + 'direct_qhy_usb_traffic': 60, + # The pattern before came and went. Now consitent at 50. Changing to 45. + # Which one of these is actually used? + 'set_qhy_usb_speed': True, + + + # These options set whether an OSC gets binned or interpolated for different functions + # If the pixel scale is well-sampled (e.g. 0.6 arcsec per RGGB pixel or 0.3 arcsec per individual debayer pixel) + # Then binning is probably fine for all three. For understampled pixel scales - which are likely with OSCs + # then binning for focus is recommended. SEP and Platesolve can generally always be binned. + # 'interpolate_for_focus': False, + # # This setting will bin the image for focussing rather than interpolating. Good for 1x1 pixel sizes < 0.6. + # 'bin_for_focus': False, + # 'focus_bin_value' : 1, + # 'interpolate_for_sep': False, + # 'bin_for_sep': False, # This setting will bin the image for SEP photometry rather than interpolating. + # 'sep_bin_value' : 1, + # This setting will bin the image for platesolving rather than interpolating. + # 'bin_for_platesolve': False, + # 'platesolve_bin_value' : 1, + + # Colour image tweaks. + 'osc_brightness_enhance': 1.0, + 'osc_contrast_enhance': 1.2, + 'osc_saturation_enhance': 1.5, + 'osc_colour_enhance': 1.2, + 'osc_sharpness_enhance': 1.2, + 'osc_background_cut': 15.0, + + + # ONLY TRANSFORM THE FITS IF YOU HAVE + # A DATA-BASED REASON TO DO SO..... + # USUALLY TO GET A BAYER GRID ORIENTATED CORRECTLY + # ***** ONLY ONE OF THESE SHOULD BE ON! ********* + 'transpose_fits': False, + 'flipx_fits': False, + 'flipy_fits': False, + 'rotate180_fits':True, # This also should be flipxy! + 'rotate90_fits': False, + 'rotate270_fits': False, + 'squash_on_x_axis': False, + + # What number of pixels to crop around the edges of a REDUCED image + # This is primarily to get rid of overscan areas and also all images + # Do tend to be a bit dodgy around the edges, so perhaps a standard + # value of 30 is good. Increase this if your camera has particularly bad + # edges. + 'reduced_image_edge_crop': 30, + + # HERE YOU CAN FLIP THE IMAGE TO YOUR HEARTS DESIRE + # HOPEFULLY YOUR HEARTS DESIRE IS SIMILAR TO THE + # RECOMMENDED DEFAULT DESIRE OF PTR + 'transpose_jpeg': False, + 'flipx_jpeg': False, + 'flipy_jpeg': False, + 'rotate90_jpeg': False, + 'rotate180_jpeg':False, + 'rotate270_jpeg': False, + + # This is purely to crop the preview jpeg for the UI + 'crop_preview': False, + 'crop_preview_ybottom': 2, # 2 needed if Bayer array + 'crop_preview_ytop': 2, + 'crop_preview_xleft': 2, + 'crop_preview_xright': 2, + + # # For large fields of view, crop the images down to solve faster. + # # Realistically the "focus fields" have a size of 0.2 degrees, so anything larger than 0.5 degrees is unnecesary + # # Probably also similar for platesolving. + # # for either pointing or platesolving even on more modest size fields of view. + # # These were originally inspired by the RASA+QHY which is 3.3 degrees on a side and regularly detects + # # tens of thousands of sources, but any crop will speed things up. Don't use SEP crop unless + # # you clearly need to. + # 'focus_image_crop_width': 0.0, # For excessive fields of view, to speed things up crop the image to a fraction of the full width + # 'focus_image_crop_height': 0.0, # For excessive fields of view, to speed things up crop the image to a fraction of the full height + # 'focus_jpeg_size': 1500, # How many pixels square to crop the focus image for the UI Jpeg + + # # PLATESOLVE CROPS HAVE TO BE EQUAL! OTHERWISE THE PLATE CENTRE IS NOT THE POINTING CENTRE + # 'platesolve_image_crop': 0.0, # Platesolve crops have to be symmetrical + + # # Really, the SEP image should not be cropped unless your field of view and number of sources + # # Are taking chunks out of the processing time. + # # For excessive fields of view, to speed things up crop the processed image area to a fraction of the full width + # 'sep_image_crop_width': 0.0, + # # For excessive fields of view, to speed things up crop the processed image area to a fraction of the full width + # 'sep_image_crop_height': 0.0, + + # This is the area for cooling related settings + 'cooler_on': True, + 'temp_setpoint': -17, # 20240914 up from 3C, new camera installed 20240604 + 'temp_setpoint_tolerance': 2, + 'has_chiller': True, + # "temp_setpoint_tolarance": 1.5, + 'chiller_com_port': 'COM1', + 'chiller_ref_temp': 25, # C 20240906 + + + + + # This is the yearly range of temperatures. + # Based on New Mexico and Melbourne's variation... sorta similar. + # There is a cold bit and a hot bit and an inbetween bit. + # from the 15th of the month to the 15 of the month + # + # ( setpoint, day_warm_difference, day_warm troe our false) + 'set_temp_setpoint_by_season' : True, + 'temp_setpoint_nov_to_feb' : ( -10, 8, True), + 'temp_setpoint_feb_to_may' : ( -15, 10, True), + 'temp_setpoint_may_to_aug' : ( -20, 12, True), + 'temp_setpoint_aug_to_nov' : ( --15,10, True), + #Prsumably this is setpoint by season if it is False: + 'day_warm': False, # This is converted to a 0 or 1 depending on the Boolean value + 'day_warm_degrees': 4, # Assuming the Chiller is working. + 'protect_camera_from_overheating': False, + + # These are the physical values for the camera + # related to pixelscale. Binning only applies to single + # images. Stacks will always be drizzled to to drizzle value from 1x1. + # 'onebyone_pix_scale': 0.528, # This is the 1x1 binning pixelscale + #'onebyone_pix_scale': 0.5283, # This is the 1x1 binning pixelscale + # Needs to be simple, it will recalculate things on the 1x1 binning pixscale above. + 'native_bin': 1, + 'x_pixel': 3.76, # pixel size in microns + 'y_pixel': 3.76, # pixel size in microns + # 'field_x': 1.3992, #4770*2*0.528/3600 + # 'field_y': 0.9331, #3181*2*0.528/3600 + # 'field_sq_deg': 1.3056, + # The drizzle_value is by the new pixelscale + # for the new resolution when stacking in the EVA pipeline + # Realistically you want a resolution of about 0.5 arcseconds per pixel + # Unless you are at a very poor quality site. + # If you have a higher resolution pixelscale it will use that instead. + # Generally leave this at 0.5 - the optimal value for ground based + # observatories.... unless you have a large field of view. + 'drizzle_value_for_later_stacking': 0.74, + 'dither_enabled': True, # Set this way for tracking testing + + + # This is the absolute minimum and maximum exposure for the camera + 'min_exposure': 0.01, + 'max_exposure': 180., + # For certain shutters, short exposures aren't good for flats. Some CMOS have banding in too short an exposure. Largely applies to ccds though. + 'min_flat_exposure': 2.0, + # Realistically there is maximum flat_exposure that makes sure flats are efficient and aren't collecting actual stars. + 'max_flat_exposure': 40.0, + # During the daytime with the daytime safety mode on, exposures will be limited to this maximum exposure + 'max_daytime_exposure': 0.2, + + + # One of the best cloud detections is to estimate the gain of the camera from the image + # If the variation, and hence gain, is too high according to gain + stdev, the flat can be easily rejected. + # Should be off for new observatories coming online until a real gain is known. + 'reject_new_flat_by_known_gain': True, + # These values are just the STARTING values. Once the software has been + # through a few nights of calibration images, it should automatically calculate these gains. + # 'camera_gain': 2.15, #[10., 10., 10., 10.], # One val for each binning. + # 'camera_gain_stdev': 0.16, #[10., 10., 10., 10.], # One val for each binning. + # 'read_noise': 9.55, #[9, 9, 9, 9], # All SWAGs right now + # 'read_noise_stdev': 0.004, #[10., 10., 10., 10.], # One val for each binning. + 'dark_lim_adu': 3.0, # adu/s of dark 20231229 moved down from 0.5 + 'dark_lim_std': 15, # first guess. See above. + # Saturate is the important one. Others are informational only. + 'fullwell_capacity': 65000, # NB Guess + 'saturate': 62500, + 'max_linearity': 61000, # Guess + # How long does it take to readout an image after exposure + 'cycle_time': 2.0, + # What is the base smartstack exposure time? + # It will vary from scope to scope and computer to computer. + # 30s is a good default. + 'smart_stack_exposure_time': 30, + 'smart_stack_exposure_NB_multiplier': 3, # Michael's setting + 'substack': False, + + # As simple as it states, how many calibration frames to collect and how many to store. + 'number_of_bias_to_collect': 31, + 'number_of_dark_to_collect': 13, + 'number_of_flat_to_collect': 7, # increased from 5 20231226 WER + 'number_of_bias_to_store': 33, + 'number_of_dark_to_store': 27, + 'number_of_flat_to_store': 21, + # Default dark exposure time. + 'dark_exposure': 180, + + # In the EVA Pipeline, whether to run cosmic ray detection on individual images + 'do_cosmics': True, + # Simialrly for Salt and Pepper + 'do_saltandpepper' : True, + # And debanding + 'do_debanding' : False, + + # Does this camera have a darkslide, if so, what are the settings? + 'has_darkslide': False, + 'darkslide_type': 'bistable', + 'darkslide_can_report': False, + 'darkslide_com': 'COM17', + 'shutter_type': "Electronic", + + + + # 'has_screen': False, + # 'screen_settings': { + # 'screen_saturation': 157.0, # This reflects WMD setting and needs proper values. + # 'screen_x4': -4E-12, # 'y = -4E-12x4 + 3E-08x3 - 9E-05x2 + 0.1285x + 8.683 20190731' + # 'screen_x3': 3E-08, + # 'screen_x2': -9E-05, + # 'screen_x1': .1258, + # 'screen_x0': 8.683 + # }, + }, + }, + + + }, + + 'sequencer': { + 'sequencer': { + 'parent': 'site', + 'name': 'sequencer', + 'desc': 'Automation Control', + 'driver': None, + }, + }, + + # I am not sure AWS needs this, but my configuration code might make use of it. + # This area should be re-purposed to introduce the pipeline and or an additional local mega-NAS. + 'server': { + 'server1': { + 'name': None, + 'win_url': None, + 'redis': '(host=none, port=6379, db=0, decode_responses=True)' + }, + }, +} + +if __name__ == '__main__': + j_dump = json.dumps(site_config) + site_unjasoned = json.loads(j_dump) + if str(site_config) == str(site_unjasoned): + print('Strings matched.') + if site_config == site_unjasoned: + print('Dictionaries matched.') diff --git a/devices/camera.py b/devices/camera.py index 625b4e9c5..73b21557c 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -1218,7 +1218,7 @@ def __init__(self, driver: str, name: str, site_config: dict, observatory: 'Obse # plog("Control is via Maxim camera interface, not ASCOM.") # plog("Please note telescope is NOT connected to Maxim.") - elif driver == 'maxim': + elif driver == 'maxim' or driver =="Maxim.CCDCamera": # NB NB NB Considerputting this up higher. plog("Maxim camera is initializing.") self._connected = self._maxim_connected @@ -1239,7 +1239,7 @@ def __init__(self, driver: str, name: str, site_config: dict, observatory: 'Obse self.ascom = False self.theskyx = False self.qhydirect = False - plog("Maxim is connected: ", self._connect(True)) + #plog("Maxim is connected: ", self._connect(True)) self.app = win32com.client.Dispatch("Maxim.Application") plog(self.camera) self.camera.SetFullFrame() @@ -1346,7 +1346,7 @@ def __init__(self, driver: str, name: str, site_config: dict, observatory: 'Obse pwm = None if self.settings["cooler_on"]: # NB NB why this logic, do we mean if not cooler found on, then turn it on and take the delay? self._set_cooler_on() - if self.theskyx: + if self.theskyx or self.ascom or self.maxim: temp, humid, pressure, pwm = self.camera.Temperature, 999.9, 999.9, 0.0 else: temp, humid, pressure , pwm = self._temperature() @@ -1356,10 +1356,10 @@ def __init__(self, driver: str, name: str, site_config: dict, observatory: 'Obse else: plog("Camera humidity and pressure is not reported.") - if self.maxim == True: - plog("TEC % load: ", self._maxim_cooler_power()) - else: - pass + # if self.maxim == True: + # plog("TEC % load: ", self._maxim_cooler_power()) + # else: + # pass #plog("TEC% load is not reported.") if pwm is not None: plog("TEC % load: ", pwm) @@ -1648,6 +1648,40 @@ def __init__(self, driver: str, name: str, site_config: dict, observatory: 'Obse target=self.camera_update_thread) self.camera_update_thread.daemon = True self.camera_update_thread.start() + + + if self.maxim: + self.maxim_connect_to_camera = False + self.maxim_request_start_exposure = False + self.maxim_retrieve_last_image = False + self.maxim_set_cooler_on = True + self.maxim_cooleron = True + self.maxim_set_setpoint_trigger = True + self.maxim_set_setpoint_value = self.setpoint + self.maxim_abort_exposure_trigger=False + self.maxim_temperature = self.camera.Temperature, 999.9, 999.9, 0 + self.camera_update_period = 5 + self.camera_update_timer = time.time() - 2 * self.camera_update_period + self.camera_updates = 0 + self.camera_update_thread = threading.Thread( + target=self.camera_update_thread) + self.camera_update_thread.daemon = True + self.camera_update_thread.start() + + if self.ascom : + self.ascom_set_cooler_on = True + self.ascom_cooleron = True + self.ascom_set_setpoint_trigger = True + self.ascom_set_setpoint_value = self.setpoint + self.ascom_temperature = self.camera.Temperature, 999.9, 999.9, 0 + self.camera_update_period = 5 + self.camera_update_timer = time.time() - 2 * self.camera_update_period + self.camera_updates = 0 + self.camera_update_thread = threading.Thread( + target=self.camera_update_thread) + self.camera_update_thread.daemon = True + self.camera_update_thread.start() + def __repr__(self): return f"" @@ -1975,7 +2009,10 @@ def camera_update_thread(self): self.camera_update_wincom = win32com.client.Dispatch(self.driver) - self.camera_update_wincom.Connect() + if self.theskyx: + self.camera_update_wincom.Connect() + elif self.maxim: + self.camera_update_wincom.LinkEnabled=True # This stopping mechanism allows for threads to close cleanly. while True: @@ -1983,39 +2020,144 @@ def camera_update_thread(self): # update every so often, but update rapidly if slewing. if (self.camera_update_timer < time.time() - self.camera_update_period) and not self.updates_paused: - if self.camera_update_reboot: - win32com.client.pythoncom.CoInitialize() - self.camera_update_wincom = win32com.client.Dispatch( - self.driver) - - self.camera_update_wincom.Connect() - - self.updates_paused = False - self.camera_update_reboot = False - - try: - self.theskyx_temperature = self.camera_update_wincom.Temperature, 999.9, 999.9, 0 - - self.theskyx_cooleron = self.camera_update_wincom.RegulateTemperature - - if self.theskyx_set_cooler_on == True: - - self.camera_update_wincom.RegulateTemperature = 1 - self.theskyx_set_cooler_on = False - - if self.theskyx_set_setpoint_trigger == True: - self.camera_update_wincom.TemperatureSetpoint = float( - self.theskyx_set_setpoint_value) - self.camera_update_wincom.RegulateTemperature = 1 - self.current_setpoint = self.theskyx_set_setpoint_value - self.theskyx_set_setpoint_trigger = False + + if self.theskyx: - if self.theskyx_abort_exposure_trigger == True: - self.camera_update_wincom.Abort() - self.theskyx_abort_exposure_trigger = False - except: - plog("non-permanent glitch out in the camera thread.") - plog(traceback.format_exc()) + if self.camera_update_reboot: + win32com.client.pythoncom.CoInitialize() + self.camera_update_wincom = win32com.client.Dispatch( + self.driver) + + self.camera_update_wincom.Connect() + + self.updates_paused = False + self.camera_update_reboot = False + + try: + self.theskyx_temperature = self.camera_update_wincom.Temperature, 999.9, 999.9, 0 + + self.theskyx_cooleron = self.camera_update_wincom.RegulateTemperature + + if self.theskyx_set_cooler_on == True: + + self.camera_update_wincom.RegulateTemperature = 1 + self.theskyx_set_cooler_on = False + + if self.theskyx_set_setpoint_trigger == True: + self.camera_update_wincom.TemperatureSetpoint = float( + self.theskyx_set_setpoint_value) + self.camera_update_wincom.RegulateTemperature = 1 + self.current_setpoint = self.theskyx_set_setpoint_value + self.theskyx_set_setpoint_trigger = False + + if self.theskyx_abort_exposure_trigger == True: + self.camera_update_wincom.Abort() + self.theskyx_abort_exposure_trigger = False + except: + plog("non-permanent glitch out in the camera thread.") + plog(traceback.format_exc()) + #breakpoint() + + elif self.maxim: # basically ascom + if self.camera_update_reboot: + win32com.client.pythoncom.CoInitialize() + self.camera_update_wincom = win32com.client.Dispatch( + self.driver) + + self.camera_update_wincom.LinkEnabled=True + + self.updates_paused = False + self.camera_update_reboot = False + + try: + self.maxim_temperature = self.camera_update_wincom.Temperature, 999.9, 999.9, 0 + + self.maxim_cooleron = self.camera_update_wincom.CoolerOn + + if self.maxim_set_cooler_on == True: + + self.camera_update_wincom.CoolerOn = True + self.maxim_set_cooler_on = False + + if self.maxim_set_setpoint_trigger == True: + self.camera_update_wincom.TemperatureSetpoint = float( + self.maxim_set_setpoint_value) + self.camera_update_wincom.CoolerOn = True + self.current_setpoint = self.maxim_set_setpoint_value + self.maxim_set_setpoint_trigger = False + + if self.maxim_abort_exposure_trigger == True: + self.camera_update_wincom.AbortExposure + self.maxim_abort_exposure_trigger = False + + #################################### + + self.maxim_is_connected=self.camera_update_wincom.LinkEnabled + + if self.maxim_connect_to_camera: + self.camera_update_wincom.LinkEnabled=True + self.maxim_connect_to_camera=False + + self.maxim_cooler_power=self.camera_update_wincom.CoolerPower + self.maxim_heatsinktemperature=self.camera_update_wincom.Temperature + + if self.maxim_request_start_exposure: + self.camera_update_wincom.Expose(self.maxim_requested_exposure_time, self.maxim_requested_lightframe) + self.maxim_request_start_exposure=False + + self.maxim_image_is_available=self.camera_update_wincom.ImageReady + + if self.maxim_retrieve_last_image: + timeouttimer=time.time() + while not self.maxim_image_is_available and time.time() -timeouttimer <30: + time.sleep(0.1) + self.maxim_image_is_available=self.camera_update_wincom.ImageReady + self.maxim_last_image_array= self.camera_update_wincom.ImageArray + self.maxim_retrieve_last_image=False + + + + + except: + plog("non-permanent glitch out in the camera thread.") + plog(traceback.format_exc()) + breakpoint() + + + else: # basically ascom + if self.camera_update_reboot: + win32com.client.pythoncom.CoInitialize() + self.camera_update_wincom = win32com.client.Dispatch( + self.driver) + + self.camera_update_wincom.Connect() + + self.updates_paused = False + self.camera_update_reboot = False + + try: + self.ascom_temperature = self.camera_update_wincom.Temperature, 999.9, 999.9, 0 + + self.ascom_cooleron = self.camera_update_wincom.RegulateTemperature + + if self.ascom_set_cooler_on == True: + + self.camera_update_wincom.RegulateTemperature = 1 + self.ascom_set_cooler_on = False + + if self.ascom_set_setpoint_trigger == True: + self.camera_update_wincom.TemperatureSetpoint = float( + self.ascom_set_setpoint_value) + self.camera_update_wincom.RegulateTemperature = 1 + self.current_setpoint = self.ascom_set_setpoint_value + self.ascom_set_setpoint_trigger = False + + if self.ascom_abort_exposure_trigger == True: + self.camera_update_wincom.Abort() + self.ascom_abort_exposure_trigger = False + except: + plog("non-permanent glitch out in the camera thread.") + plog(traceback.format_exc()) time.sleep(max(1, self.camera_update_period)) else: @@ -2662,57 +2804,71 @@ def _zwo_getImageArray(self): def _maxim_connected(self): - return self.camera.LinkEnabled + #return self.camera.LinkEnabled + return self.maxim_is_connected def _maxim_connect(self, p_connect): - self.camera.LinkEnabled = p_connect - return self.camera.LinkEnabled + # self.camera.LinkEnabled = p_connect + # return self.camera.LinkEnabled + self.maxim_connect_to_camera=True + while not self.maxim_is_connected: + time.sleep(0.1) def _maxim_temperature(self): - return self.camera.Temperature, 999.9, 999.9,0 + return self.maxim_temperature#, 999.9, 999.9,0 def _maxim_cooler_power(self): - return self.camera.CoolerPower + return self.maxim_cooler_power def _maxim_heatsink_temp(self): - return self.camera.HeatSinkTemperature + return self.maxim_heatsinktemperature def _maxim_cooler_on(self): - return ( - self.camera.CoolerOn - ) + + return self.maxim_cooleron def _maxim_set_cooler_on(self): - self.camera.CoolerOn = True - return ( - self.camera.CoolerOn - ) + self.maxim_set_cooler_on=True + return True def _maxim_set_setpoint(self, p_temp): - self.camera.TemperatureSetpoint = float(p_temp) - self.current_setpoint = p_temp - return self.camera.TemperatureSetpoint + + self.maxim_set_setpoint_trigger=True + self.maxim_set_setpoint_value = float(p_temp) + self.current_setpoint = float(p_temp) + return float(p_temp) + + + # self.camera.TemperatureSetpoint = float(p_temp) + # self.current_setpoint = p_temp + # return self.camera.TemperatureSetpoint def _maxim_setpoint(self): - return self.camera.TemperatureSetpoint + return self.current_setpoint def _maxim_expose(self, exposure_time, bias_dark_or_light_type_frame): if bias_dark_or_light_type_frame == 'bias' or bias_dark_or_light_type_frame == 'dark': - imtypeb = 0 + imtypeb = False else: - imtypeb = 1 - self.camera.Expose(exposure_time, imtypeb) + imtypeb = True + + self.maxim_requested_exposure_time=exposure_time + self.maxim_requested_lightframe=imtypeb + self.maxim_request_start_exposure=True def _maxim_stop_expose(self): - self.camera.AbortExposure() + self.maxim_abort_exposure_trigger=True self.expresult = {} self.expresult["stopped"] = True def _maxim_imageavailable(self): - return self.camera.ImageReady + return self.maxim_image_is_available def _maxim_getImageArray(self): - return np.asarray(self.camera.ImageArray) + self.maxim_retrieve_last_image=True + while self.maxim_retrieve_last_image: + time.sleep(0.05) + return np.asarray(self.maxim_last_image_array) def _ascom_connected(self): return self.camera.Connected @@ -5279,6 +5435,11 @@ def finish_exposure( #breakpoint() except Exception as e: + + if self.maxim: + plog(e) + plog(traceback.format_exc()) + breakpoint() if self.theskyx: if 'No such file or directory' in str(e): diff --git a/devices/sequencer.py b/devices/sequencer.py index 20d4125ff..e04fc2764 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -1080,10 +1080,12 @@ def manager(self): # Collect frames for frame in frames_to_collect: - exposure_time, image_type, count_multiplier = frame[:3] - check_exposure = frame[3] if len(frame) > 3 else False - if not self.collect_midnight_frame(exposure_time, image_type, count_multiplier, stride, min_exposure, check_exposure): - break + print (frame[0]) + if frame[0] >= min_exposure: + exposure_time, image_type, count_multiplier = frame[:3] + check_exposure = frame[3] if len(frame) > 3 else False + if not self.collect_midnight_frame(exposure_time, image_type, count_multiplier, stride, min_exposure, check_exposure): + break if g_dev['cam'].temp_setpoint_by_season: # Here change the setpoint back to tonight's setpoint @@ -2175,7 +2177,7 @@ def bias_dark_script(self, req=None, opt=None, morn=False, ending=None): broadband_ss_biasdark_exp_time = g_dev['cam'].settings['smart_stack_exposure_time'] narrowband_ss_biasdark_exp_time = broadband_ss_biasdark_exp_time * g_dev['cam'].settings['smart_stack_exposure_NB_multiplier'] # There is no point getting biasdark exposures below the min_flat_exposure time aside from the scaled dark values. - # min_exposure = min(float(g_dev['cam'].settings['min_flat_exposure']),float(g_dev['cam'].settings['min_exposure'])) + min_exposure = min(float(g_dev['cam'].settings['min_flat_exposure']),float(g_dev['cam'].settings['min_exposure'])) @@ -2368,9 +2370,9 @@ def bias_dark_script(self, req=None, opt=None, morn=False, ending=None): # Iterate over exposure settings for exposure_time, image_type, count_multiplier in exposures: - #if exposure_time >= min_exposure: - if not self.collect_dark_frame(exposure_time, image_type, count_multiplier, stride, min_to_do, dark_exp_time, cycle_time, ending): - break + if exposure_time >= min_exposure: + if not self.collect_dark_frame(exposure_time, image_type, count_multiplier, stride, min_to_do, dark_exp_time, cycle_time, ending): + break # Collect additional frames if not self.collect_bias_frame(stride, stride, min_to_do, dark_exp_time, cycle_time, ending): From b85cc8243f3a960b550e6fb0cc66c722e3a70475 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Thu, 3 Jul 2025 07:35:02 +0000 Subject: [PATCH 27/30] scheduler updates --- configs/eco1/obs_config.py | 7 +++++++ devices/camera.py | 3 ++- devices/schedule_manager.py | 33 +++++++++++++++++++++++---------- devices/sequencer.py | 5 +++++ obs.py | 16 ++++++++++------ 5 files changed, 47 insertions(+), 17 deletions(-) diff --git a/configs/eco1/obs_config.py b/configs/eco1/obs_config.py index f436d22b4..712913eee 100644 --- a/configs/eco1/obs_config.py +++ b/configs/eco1/obs_config.py @@ -33,11 +33,18 @@ # 'jobs_http_base' : 'https://jobs.photonranch.org/jobs/', # 'logs_http_base' : 'https://logs.photonranch.org/logs/', # 'status_http_base' : 'https://status.photonranch.org/status/', + + #'calendar_update_url': 'https://calendar.photonranch.org/calendar/siteevents', + + #'url_proj': "https://projects.photonranch.org/projects/get-project", + 'api_http_base' : 'https://hub.nextastro.org/', 'jobs_http_base' : 'https://hub.nextastro.org/jobs', 'logs_http_base' : 'https://hub.nextastro.org/logs/', 'status_http_base' : 'https://hub.nextastro.org/status/', + 'calendar_update_url' : 'https://hub.nextastro.org/calendar/siteevents', + 'url_proj': "https://hub.nextastro.org/get-project", # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m4f6.8', diff --git a/devices/camera.py b/devices/camera.py index 80de13320..80bb0f8a8 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -4694,7 +4694,8 @@ def clean_object_name(name): g_dev['obs'].images_since_last_solve > self.site_config["solve_nth_image"] or # Too much time passed since last solve - (datetime.now(tz=timezone.utc) - g_dev['obs'].last_solve_time) > + #(datetime.now(tz=timezone.utc) - g_dev['obs'].last_solve_time) > + (datetime.utcnow() - g_dev['obs'].last_solve_time) > timedelta(minutes=self.site_config["solve_timer"]) )) ) diff --git a/devices/schedule_manager.py b/devices/schedule_manager.py index 04b6fed8e..83233063e 100644 --- a/devices/schedule_manager.py +++ b/devices/schedule_manager.py @@ -15,11 +15,13 @@ def __init__(self, schedule_start: int, schedule_end: int, site_proxy, # SiteProxy object, passed from the observatory class - ptr_update_interval: int=60, # Default to 60 seconds + ptr_update_interval: int=15, # Default to 60 seconds lco_update_interval: int=30, # Default to 30 seconds include_lco_scheduler: bool=True, configdb_telescope: str=None, configdb_enclosure: str=None, + calendar_update_url: str=None, + url_proj: str=None ): """ A class that manages the schedule for the night. This class is responsible for getting the schedule from the site proxy and the PTR calendar and keeping track of the events that are happening. @@ -62,6 +64,9 @@ def __init__(self, self._ptr_events = [] self._lco_events = [] self._time_of_last_lco_schedule = None + + self.calendar_update_url = calendar_update_url + self.url_proj = url_proj self._completed_ids = [] @@ -149,7 +154,7 @@ def update_ptr_schedule(self, start_time=None, end_time=None): - end_time (str): get events ending before this time. string formatted as YYYY-mm-ddTHH:MM:SSZ """ - calendar_update_url = "https://calendar.photonranch.org/calendar/siteevents" + #calendar_update_url = "https://calendar.photonranch.org/calendar/siteevents" if start_time is None or not is_valid_utc_iso(start_time): start_time = datetime.fromtimestamp(self.schedule_start).isoformat().split(".")[0] + "Z" @@ -170,10 +175,11 @@ def update_ptr_schedule(self, start_time=None, end_time=None): }) try: - events = requests.post(calendar_update_url, body, timeout=20).json() + events = requests.post(self.calendar_update_url, body, timeout=20).json() except: - plog(f"ERROR: Failed to update the calendar. This is not normal. Request url was {calendar_update_url} and body was {body}.") + plog(f"ERROR: Failed to update the calendar. This is not normal. Request url was {self.calendar_update_url} and body was {body}.") events = [] + print (events) with self._lock: self._ptr_events = [ @@ -196,12 +202,19 @@ def get_ptr_project_details(self, event): if 'project_id' not in event or event['project_id'] in null_project_ids: return None - url_proj = "https://projects.photonranch.org/projects/get-project" - request_body = json.dumps({ - "project_name": event['project_id'].split('#')[0], - "created_at": event['project_id'].split('#')[1], - }) - response = requests.post(url_proj, request_body, timeout=10) + #url_proj = "https://projects.photonranch.org/projects/get-project" + + #breakpoint() + try: + request_body = json.dumps({ + "project_name": event['project_id'].split('#')[0], + "created_at": event['project_id'].split('#')[1], + }) + except: + + request_body = json.dumps({"project_name": event['project_id']}) + + response = requests.post(self.url_proj, request_body, timeout=10) if response.status_code == 200: project = response.json() diff --git a/devices/sequencer.py b/devices/sequencer.py index b09d705d3..051cd21c7 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -156,6 +156,7 @@ def __init__(self, observatory: 'Observatory'): include_lco_scheduler = 'SITE_PROXY_TOKEN' in os.environ configdb_telescope = self.config.get('configdb_telescope') configdb_enclosure = self.config.get('configdb_enclosure') + #breakpoint() self.schedule_manager = NightlyScheduleManager( self.config['obs_id'], schedule_start, @@ -164,6 +165,8 @@ def __init__(self, observatory: 'Observatory'): include_lco_scheduler=include_lco_scheduler, configdb_telescope=configdb_telescope, configdb_enclosure=configdb_enclosure, + calendar_update_url=self.config['calendar_update_url'], + url_proj=self.config['url_proj'] ) # Add a fake lco observation to the calendar, used for testing @@ -883,6 +886,8 @@ def manager(self): try: # Get the observation to run now (or None) current_observation = self.schedule_manager.get_observation_to_run() + + print (current_observation) # Nothing to observe if current_observation is None: diff --git a/obs.py b/obs.py index f45cee6b1..d659c73e6 100644 --- a/obs.py +++ b/obs.py @@ -4493,12 +4493,16 @@ def fast_to_aws(self): timeout=10, ) except: - reqs.post( - aws_resp["url"], - #data=aws_resp["fields"], - files=files, - timeout=10, - ) + try: + reqs.post( + aws_resp["url"], + #data=aws_resp["fields"], + files=files, + timeout=10, + ) + except: + plog.err((traceback.format_exc())) + breakpoint() except Exception as e: plog.err((traceback.format_exc())) if ( From b47ca4885546ed809adf769274e7272f93723cd9 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Fri, 4 Jul 2025 05:43:42 +0000 Subject: [PATCH 28/30] projects working. --- configs/eco1/obs_config.py | 2 +- devices/schedule_manager.py | 5 +++ devices/sequencer.py | 9 +++++- obs.py | 63 +++++++++++++++++++++++++++---------- 4 files changed, 61 insertions(+), 18 deletions(-) diff --git a/configs/eco1/obs_config.py b/configs/eco1/obs_config.py index 712913eee..41e2f6fad 100644 --- a/configs/eco1/obs_config.py +++ b/configs/eco1/obs_config.py @@ -44,7 +44,7 @@ 'logs_http_base' : 'https://hub.nextastro.org/logs/', 'status_http_base' : 'https://hub.nextastro.org/status/', 'calendar_update_url' : 'https://hub.nextastro.org/calendar/siteevents', - 'url_proj': "https://hub.nextastro.org/get-project", + 'url_proj': "https://hub.nextastro.org/projects/get-project", # Name, local and owner stuff 'name': 'Eltham College Observatory, 0m4f6.8', diff --git a/devices/schedule_manager.py b/devices/schedule_manager.py index 83233063e..d31245892 100644 --- a/devices/schedule_manager.py +++ b/devices/schedule_manager.py @@ -222,6 +222,7 @@ def get_ptr_project_details(self, event): else: plog('Failed to retrieve project details for event: ', event) + print (response) return None def update_now(self, override_warning=False): @@ -374,6 +375,10 @@ def get_observation_to_run(self, unix_time=None): # a project associated with it else: project = self.get_ptr_project_details(event) + + # print ("GOOGLGOE") + # breakpoint() + if project: return { **event, diff --git a/devices/sequencer.py b/devices/sequencer.py index 2bcc9c5d7..bed31f34f 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -887,8 +887,12 @@ def manager(self): # Get the observation to run now (or None) current_observation = self.schedule_manager.get_observation_to_run() + + print (current_observation) + + # Nothing to observe if current_observation is None: current_observation=None @@ -1328,7 +1332,7 @@ def execute_project_from_lco(self, block_specification): except Exception as e: plog ("Could not execute project due to poorly formatted or corrupt project") plog (e) - g_dev['obs'].send_to_user("Could not execute project due to poorly formatted or corrupt project", p_level='INFO') + #g_dev['obs'].send_to_user("Could not execute project due to poorly formatted or corrupt project", p_level='INFO') self.blockend = None continue @@ -1642,6 +1646,8 @@ def execute_block(self, block_specification): g_dev['obs'].send_to_user("Could not execute project due to poorly formatted or corrupt project", p_level='INFO') self.blockend = None continue + + #breakpoint() # Store this ra as the "block" ra for centering purposes self.block_ra=copy.deepcopy(dest_ra) @@ -1706,6 +1712,7 @@ def execute_block(self, block_specification): #Compute how many to do. left_to_do = 0 ended = False + #breakpoint() for exposure in block['project']['exposures']: exposure['substack'] = do_sub_stack diff --git a/obs.py b/obs.py index d659c73e6..5714bd0d6 100644 --- a/obs.py +++ b/obs.py @@ -1022,21 +1022,53 @@ def get_wema_config(self): """ Fetch the WEMA config from AWS """ wema_config = None url = self.api_http_base + f"{self.wema_name}/config/" - try: - response = requests.get(url, timeout=20) - data = response.json() - # if the top‐level JSON has a 'wema_name' key, use the whole dict, - # otherwise pull out the 'configuration' sub-dict - if 'wema_name' in data: - wema_config = data - else: - wema_config = data.get('configuration', {}) - wema_last_recorded_day_dir = wema_config.get('events', {}).get('day_directory', '') - plog(f"Retrieved wema config, latest version is from day_directory {wema_last_recorded_day_dir}") - except Exception as e: - plog(traceback.format_exc()) - breakpoint() - plog.warn("WARNING: failed to get wema config!", e) + # try: + # response = requests.get(url, timeout=20) + # data = response.json() + # # if the top‐level JSON has a 'wema_name' key, use the whole dict, + # # otherwise pull out the 'configuration' sub-dict + # if 'wema_name' in data: + # wema_config = data + # else: + # wema_config = data.get('configuration', {}) + # wema_last_recorded_day_dir = wema_config.get('events', {}).get('day_directory', '') + # plog(f"Retrieved wema config, latest version is from day_directory {wema_last_recorded_day_dir}") + # except Exception as e: + # plog(traceback.format_exc()) + # breakpoint() + # plog.warn("WARNING: failed to get wema config!", e) + + + """ + Continuously fetch the WEMA configuration every 30 seconds. + On success, logs the retrieved day_directory; on failure, logs the traceback. + """ + while True: + try: + response = requests.get(url, timeout=20) + response.raise_for_status() + data = response.json() + + # if the top-level JSON has a 'wema_name' key, use the whole dict, + # otherwise pull out the 'configuration' sub-dict + if 'wema_name' in data: + wema_config = data + else: + wema_config = data.get('configuration', {}) + + wema_last_recorded_day_dir = wema_config.get('events', {}) \ + .get('day_directory', '') + plog(f"Retrieved wema config, latest version is from day_directory {wema_last_recorded_day_dir}") + break + except Exception as e: + plog(traceback.format_exc()) + # If you want to drop into the debugger on failure: + # breakpoint() + plog.warn("WARNING: failed to get wema config!", e) + + # wait 30 seconds before next attempt (whether success or failure) + time.sleep(30) + return wema_config @@ -1059,7 +1091,6 @@ def update_config(self): retryapi = False except: plog(traceback.format_exc()) - breakpoint() plog.warn("connection glitch in update config. Waiting 5 seconds.") time.sleep(5) if "message" in response: From 85934ca32f7270ae3c19f2b2ee3a2af4a42bab05 Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sat, 5 Jul 2025 12:17:47 +0000 Subject: [PATCH 29/30] projects reporting properly --- devices/camera.py | 2 +- devices/schedule_manager.py | 2 +- devices/sequencer.py | 20 +++++++++++++++++--- subprocesses/post_exposure_subprocess.py | 6 ++++-- 4 files changed, 23 insertions(+), 7 deletions(-) diff --git a/devices/camera.py b/devices/camera.py index 06736fafb..020e2c36c 100644 --- a/devices/camera.py +++ b/devices/camera.py @@ -5250,7 +5250,7 @@ def clean_object_name(name): outputimg = self._getImageArray() # .astype(np.float32) imageCollected = 1 - if True: + if False: height, width = outputimg.shape patch = outputimg[int(0.4*height):int(0.6*height), int(0.4*width):int(0.6*width)] plog(">>>> 20% central image patch, std: ", bn.nanmedian(patch), round(bn.nanstd(patch), 2), str(width)+'x'+str(height) ) diff --git a/devices/schedule_manager.py b/devices/schedule_manager.py index d31245892..732995b13 100644 --- a/devices/schedule_manager.py +++ b/devices/schedule_manager.py @@ -179,7 +179,7 @@ def update_ptr_schedule(self, start_time=None, end_time=None): except: plog(f"ERROR: Failed to update the calendar. This is not normal. Request url was {self.calendar_update_url} and body was {body}.") events = [] - print (events) + #print (events) with self._lock: self._ptr_events = [ diff --git a/devices/sequencer.py b/devices/sequencer.py index bed31f34f..cc0ff8e75 100644 --- a/devices/sequencer.py +++ b/devices/sequencer.py @@ -1697,7 +1697,12 @@ def execute_block(self, block_specification): self.auto_focus_script(req2, {}, throw = g_dev['foc'].throw) g_dev["foc"].focus_needed = False - pa = float(block_specification['project']['project_constraints']['position_angle']) + try: + pa = float(block_specification['project'] + ['project_constraints'] + ['position_angle']) + except (KeyError, TypeError, ValueError): + pa = 0.0 if abs(pa) > 0.01: try: g_dev['rot'].rotator.MoveAbsolute(pa) #Skip rotator move if nominally 0 @@ -1855,7 +1860,11 @@ def execute_block(self, block_specification): except: repeat_count = 1 # We should add a frame repeat count - imtype = exposure['imtype'] + try: + imtype = exposure['imtype'] + except: + imtype = 'light' + # MUCH safer to calculate these from first principles # Than rely on an owner getting this right! @@ -1884,7 +1893,12 @@ def execute_block(self, block_specification): except: pass - zoom_factor = exposure['zoom'].lower() + try: + zoom_factor = exposure['zoom'].lower() + except: + exposure['zoom'] = 'full' + zoom_factor = 'full' + if exposure['zoom'].lower() in ["full", 'Full'] or 'X' in exposure['zoom'] \ or '%' in exposure['zoom'] or ( exposure['zoom'].lower() == 'small sq.') \ or (exposure['zoom'].lower() == 'small sq'): diff --git a/subprocesses/post_exposure_subprocess.py b/subprocesses/post_exposure_subprocess.py index 3a11b4878..52a432005 100644 --- a/subprocesses/post_exposure_subprocess.py +++ b/subprocesses/post_exposure_subprocess.py @@ -1564,11 +1564,13 @@ def timestamp_to_LCO_datestring(t): hdu.header["PEDESTAL"] = (0.0, "This value has been added to the data") hdu.header["ERRORVAL"] = 0 - hdu.header["USERNAME"] = observer_user_name + #hdu.header["USERNAME"] = observer_user_name hdu.header["USERID"] = ( str(observer_user_id).replace("-", "").replace("|", "").replace('@','at') ) - + hdu.header["USERNAME"] = ( + str(observer_user_id).replace("-", "").replace("|", "").replace('@','at') + ) im_type = "EX" f_ext = "" From e866ae2bd022bb752cb239e93b2173d0a5e6ce1d Mon Sep 17 00:00:00 2001 From: mfitzasp Date: Sun, 6 Jul 2025 06:06:06 +1000 Subject: [PATCH 30/30] Update photometry_process.py --- subprocesses/photometry_process.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/subprocesses/photometry_process.py b/subprocesses/photometry_process.py index 241048a07..9e5ffa7af 100644 --- a/subprocesses/photometry_process.py +++ b/subprocesses/photometry_process.py @@ -617,8 +617,10 @@ def localMax(a, include_diagonal=True, threshold=-np.inf) : try: hduheader["SEPSKY"] = sepsky + hduheader["SKYLEVEL"] = sepsky except: hduheader["SEPSKY"] = -9999 + hduheader["SKYLEVEL"] = -9999 try: hduheader["FWHM"] = (str(rfp), 'FWHM in pixels') hduheader["FWHMpix"] = (str(rfp), 'FWHM in pixels')