From 6d1520307eb53a6f2226e1262d6e55cf12df1b30 Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Fri, 16 May 2025 16:23:01 +0530 Subject: [PATCH 001/195] remove 34 data files-temp --- ...miseks_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...nikule_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...3tjale_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...3tjale_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...miseks_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...miseks_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...miseks_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...03igus_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...miseks_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...miseks_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...mesele_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...mesele_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...mesele_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...mesele_-_Politsei-_ja_Piirivalveamet.json" | 1 - ...alasele_-_Politsei-_ja_Piirivalveamet.txt" | 9 ----- ...umiseks_-_Politsei-_ja_Piirivalveamet.txt" | 8 ---- ...anikule_-_Politsei-_ja_Piirivalveamet.txt" | 9 ----- ...03tjale_-_Politsei-_ja_Piirivalveamet.txt" | 9 ----- ...03tjale_-_Politsei-_ja_Piirivalveamet.txt" | 9 ----- ...storile_-_Politsei-_ja_Piirivalveamet.txt" | 9 ----- ...alasele_-_Politsei-_ja_Piirivalveamet.txt" | 7 ---- ...umiseks_-_Politsei-_ja_Piirivalveamet.txt" | 8 ---- ...umiseks_-_Politsei-_ja_Piirivalveamet.txt" | 7 ---- ...umiseks_-_Politsei-_ja_Piirivalveamet.txt" | 7 ---- ...203igus_-_Politsei-_ja_Piirivalveamet.txt" | 8 ---- ...amiseks_-_Politsei-_ja_Piirivalveamet.txt" | 8 ---- ...umiseks_-_Politsei-_ja_Piirivalveamet.txt" | 25 ------------ ...umiseks_-_Politsei-_ja_Piirivalveamet.txt" | 25 ------------ ...imesele_-_Politsei-_ja_Piirivalveamet.txt" | 4 -- ...imesele_-_Politsei-_ja_Piirivalveamet.txt" | 39 ------------------ ...imesele_-_Politsei-_ja_Piirivalveamet.txt" | 11 ----- ...imesele_-_Politsei-_ja_Piirivalveamet.txt" | 40 ------------------- ...paaniad_-_Politsei-_ja_Piirivalveamet.txt" | 8 ---- ...se_load_-_Politsei-_ja_Piirivalveamet.txt" | 15 ------- 34 files changed, 279 deletions(-) delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Eestis_to\303\214\302\210o\303\214\302\210tamise_info_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_suurinvestorile_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_o\303\214\302\203ppimiseks_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Projekt_\303\242\302\200\302\236Alaealiste_erikohtlemise_su\303\214\302\210steemi_loomine\303\242\302\200\302\234_-_Ennetusprojektid_ja_kampaaniad_-_Politsei-_ja_Piirivalveamet.txt" delete mode 100644 "src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/So\303\214\302\203jarelvade,_laskemoona_ja_lahingumoona_ka\303\214\302\210itlemise_tegevusluba_-_Majandustegevuse_load_-_Politsei-_ja_Piirivalveamet.txt" diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 7d1fff6d..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks.\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\nLoe Eestis viibimise seaduslike aluste kohta.\n\n\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba alaealisele lapsele pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet", "description": "Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Loe Eestis viibimise seaduslike aluste kohta.  . Migratsioonin\u00f5ustaja...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-alaealisele-lapsele-pereliikme-juurde-elama-asumiseks/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 7d71bde4..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nMis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada.\nMis saab siis, kui elamisluba tunnistatakse kehtetuks?\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\n\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba ettev\u00f5tluseks \u00e4ri\u00fchingu osanikule - Politsei- ja Piirivalveamet", "description": "Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Migratsioonin\u00f5ustaja on sulle igal sammul...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-ettevotluseks-aeriuehingu-osanikule/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 81defad3..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nMis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada.\nMis saab siis, kui elamisluba tunnistatakse kehtetuks?\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\n\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba ettev\u00f5tluseks f\u00fc\u00fcsilisest isikust ettev\u00f5tjale - Politsei- ja Piirivalveamet", "description": "Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Migratsioonin\u00f5ustaja on sulle igal sammul...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-ettevotluseks-fueuesilisest-isikust-ettevotjale/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index c2d80095..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nMis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada.\nMis saab siis, kui elamisluba tunnistatakse kehtetuks?\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\n\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba ettev\u00f5tluseks iduettev\u00f5tjale - Politsei- ja Piirivalveamet", "description": ". Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Migratsioonin\u00f5ustaja on sulle igal samm...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-ettevotluseks-iduettevotjale/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index ccf44157..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks.\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\nLoe Eestis viibimise seaduslike aluste kohta.\n\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba pereliikmele abikaasa juurde elama asumiseks - Politsei- ja Piirivalveamet", "description": "Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Loe Eestis viibimise seaduslike aluste kohta.  . Migratsioonin\u00f5ustaja...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-pereliikmele-abikaasa-juurde-elama-asumiseks/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index ddb3e9d9..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 270 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada.\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\nL\u00e4hemalt saad Eestis viibimise seaduslike aluste kohta lugeda siit.\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba t\u00e4isealisele lapsele pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet", "description": ". Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 270 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. L\u00e4hemalt saad Eestis viibimise seadusl...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-taeisealisele-lapsele-pereliikme-juurde-elama-asumiseks/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 58383aea..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 270 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada.\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\nL\u00e4hemalt saad Eestis viibimise seaduslike aluste kohta lugeda siit.\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Elamisluba vanemale v\u00f5i vanavanemale pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet", "description": ". Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 270 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. L\u00e4hemalt saad Eestis viibimise seadusl...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-vanemale-voi-vanavanemale-pereliikme-juurde-elama-asumiseks/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index e00b9452..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nMis saab siis, kui elamis\u00f5igus l\u00f5ppeb v\u00f5i seda ei pikendata?\nKui sinu elamis\u00f5igus l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada.\nMis saab siis, kui elamis\u00f5igus tunnistatakse kehtetuks?\nKui elamis\u00f5igus tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt. (https://www.politsei.ee/et/migratsiooninoustajad)", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - Perekonnaliikme t\u00e4htajaline elamis\u00f5igus - Politsei- ja Piirivalveamet", "description": "Mis saab siis, kui elamis\u00f5igus l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamis\u00f5igus l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Sel ajal v\u00f5id Eestis ka t\u00f6\u00f6tada. Mis saab siis, kui elamis\u00f5igus tunnistatakse kehtetuks?. Kui elamis\u00f5igus tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Migratsioonin\u00f5ustaja on sulle igal sa...", "source_url": "https://www.politsei.ee/et/juhend/perekonnaliikme-taehtajaline-elamisoigus/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 67275862..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine\nKui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks.\nKui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma.\nLoe Eestis viibimise seaduslike aluste kohta.\n\nMigratsioonin\u00f5ustaja on sulle igal sammul abiks. K\u00fcsi migratsioonin\u00f5ustajalt (https://www.politsei.ee/et/migratsiooninoustajad) .", "imgurl": "", "title": "Elamisloa l\u00f5ppemine v\u00f5i kehtetuks tunnistamine - T\u00e4htajaline elamisluba p\u00fcsivalt elamiseks - Politsei- ja Piirivalveamet", "description": ". Mis saab siis, kui elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata?. Kui sinu elamisluba l\u00f5ppeb v\u00f5i seda ei pikendata, v\u00f5id Eestisse j\u00e4\u00e4da veel kuni 90 p\u00e4evaks. Mis saab siis, kui elamisluba tunnistatakse kehtetuks?. Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. Loe Eestis viibimise seaduslike aluste kohta.  . Migratsioonin\u00f5usta...", "source_url": "https://www.politsei.ee/et/juhend/taehtajaline-elamisluba-puesivalt-elamiseks/elamisloa-loppemine-voi-kehtetuks-tunnistamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index b54bce34..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Elamisloa pikendamine\nKuidas pikendada elamisluba?\n\n- Taotle elamisloa pikendamist v\u00e4hemalt 2 kuud enne elamisloa l\u00f5ppemist.\n- Esita n\u00f5utud dokumendid ja tasu riigil\u00f5iv.\n- \u00d5igeaegselt esitatud taotlusele saad vastuse hiljemalt 10 p\u00e4eva enne kehtiva elamisloa l\u00f5ppemist.\nKui andsid s\u00f5rmej\u00e4ljed viimase 6 aasta jooksul ja sul on kehtiv elamisloakaart, saad elamisluba pikendada ka e-posti teel.\nKuidas taotleda elamisloa pikendamist Politsei- ja Piirivalveameti teeninduses?\nKui viibid Eestis, esita elamisloa taotlus Politsei- ja Piirivalveameti teeninduses. Selleks broneeri aeg veebis.\nMillised dokumendid pead esitama elamisloa pikendamiseks?\nKui taotled elamisloa pikendamist, esita:", "imgurl": "", "title": "Elamisloa pikendamine - Elamisluba vanemale v\u00f5i vanavanemale pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet", "description": "Kuidas pikendada elamisluba?. Taotle elamisloa pikendamist v\u00e4hemalt 2 kuud enne elamisloa l\u00f5ppemist. Esita n\u00f5utud dokumendid ja tasu riigil\u00f5iv. \u00d5igeaegselt esitatud taotlusele saad vastuse hiljemalt 10 p\u00e4eva enne kehtiva elamisloa l\u00f5ppemist. Kui andsid s\u00f5rmej\u00e4ljed viimase 6 aasta jooksul ja sul on kehtiv elamisloakaart, saad elamisluba pikendada ka...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-vanemale-voi-vanavanemale-pereliikme-juurde-elama-asumiseks/elamisloa-pikendamine"}}, {"content": {"chunk": "- t\u00e4htajalise elamisloa pikendamise taotlus (https://www.politsei.ee/files/dokumendid/ankeedid/2023_03/tep-taotlus-est-08.02.2024.pdf?f4680da745)\n- andmed l\u00e4hedaste sugulaste ja perekonnaliikmete kohta ja taotlemisel n\u00f5utud dokumendid (https://www.politsei.ee/files/dokumendid/ankeedid/2023/pereliikmed-est-27.12.2023.pdf?d1f55577c5) vaid sel juhul, kui andmed on muutunud\n- Lapse v\u00f5i lapselapse kutse (/files/dokumendid/ankeedid/2023_4/esttaisealiselapsevoilapselapsekutse-est.pdf?17ce73ac1c) , mille t\u00e4idab ja allkirjastab laps v\u00f5i lapselaps v\u00f5i kirjalik kinnitus, et andmed ei ole muutunud\n- eluloolised andmed (https://www.politsei.ee/files/dokumendid/ankeedid/2023/eluloolised-andmed-est-2023-03-18.pdf?3d91c6b52d)\n- lisaankeet (https://www.politsei.ee/files/dokumendid/ankeedid/2023/lisaankeet-1-est-2023-03-17.pdf?0f03b87480)\n- isikut t\u00f5endav dokument\n- digitaalne foto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised)\n- riigil\u00f5ivu tasumist t\u00f5endav dokument (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad/elamisluba-ja-elamisoigus) , n\u00e4iteks maksekorraldus.\nMenetleja v\u00f5ib vajadusel paluda lisaandmeid v\u00f5i -dokumente.\nLoe v\u00e4lisriigis v\u00e4lja antud dokumentide n\u00f5uete kohta. (http://vm.ee/et/avaliku-dokumendi-legaliseerimine)\nKui kiiresti otsustatakse elamisloa pikendamine?\nOtsus elamisloa pikendamise v\u00f5i sellest keeldumise kohta tehakse hiljemalt 10 p\u00e4eva enne sinu elamisloa kehtivusaja l\u00f5ppemist, kui esitasid pikendamise taotluse 2 kuud enne elamisloa kehtivuse l\u00f5ppemist.", "imgurl": "", "title": "Elamisloa pikendamine - Elamisluba vanemale v\u00f5i vanavanemale pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet", "description": "Kuidas pikendada elamisluba?. Taotle elamisloa pikendamist v\u00e4hemalt 2 kuud enne elamisloa l\u00f5ppemist. Esita n\u00f5utud dokumendid ja tasu riigil\u00f5iv. \u00d5igeaegselt esitatud taotlusele saad vastuse hiljemalt 10 p\u00e4eva enne kehtiva elamisloa l\u00f5ppemist. Kui andsid s\u00f5rmej\u00e4ljed viimase 6 aasta jooksul ja sul on kehtiv elamisloakaart, saad elamisluba pikendada ka...", "source_url": "https://www.politsei.ee/et/juhend/elamisluba-vanemale-voi-vanavanemale-pereliikme-juurde-elama-asumiseks/elamisloa-pikendamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index f537552a..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Liikumis- ja allkirjav\u00f5imetule inimese dokumendi k\u00e4ttesaamine\nKui dokument on valmis, siis v\u00f5tame taotleja kontaktisikuga \u00fchendust ja lepime kokku dokumendi k\u00e4ttesaamise aja ja koha.", "imgurl": "", "title": "Liikumis- ja allkirjav\u00f5imetule inimese dokumendi k\u00e4ttesaamine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "Kui dokument on valmis, siis v\u00f5tame taotleja kontaktisikuga \u00fchendust ja lepime kokku dokumendi k\u00e4ttesaamise aja ja koha.", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumis-ja-allkirjavoimetule-inimese-dokumendi-kattesaamine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 7b8ac9ef..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Liikumisv\u00f5imetule inimesele ID-kaardi taotlemine\nS\u00f5rmej\u00e4lgede andmine\nAlates 12. eluaastast on ID-kaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Elamisloakaardi puhul peab s\u00f5rmej\u00e4ljed andma alates 6. eluaastast.\nJuhul, kui ID-kaardi v\u00f5i pikaajalise elaniku elamisloakaardi taotleja on vanem kui 70-aastane ning temalt on varasemalt v\u00f5etud s\u00f5rmej\u00e4ljed, saab uut dokumenti taotleda ilma uusi s\u00f5rmej\u00e4lgi andmata.\nMuudel juhtudel, kui liikumisv\u00f5imetu taotleja puhul on s\u00f5rmej\u00e4ljed h\u00f5ivatud v\u00e4hem kui 11 aastat tagasi, saab ID-kaarti v\u00f5i elamisloakaarti taotleda ilma uusi s\u00f5rmej\u00e4lgi andmata. Dokumendi taotlemiseks esita PPA-le koos dokumenditaotlusega taotleja terviseseisundit t\u00f5endava dokument (arstit\u00f5end).\nKui s\u00f5rmej\u00e4lgede andmine on vajalik, kuid dokumenditaotleja ei saa teenindusse s\u00f5rmej\u00e4lgede andmiseks tulla, siis esita PPA-le koos dokumenditaotluse ja taotleja terviseseisundit t\u00f5endava dokumendiga (arstit\u00f5end) avaldus s\u00f5rmej\u00e4lgede h\u00f5ivamiseks taotleja viibimiskohas.\nTaotlemine iseteeninduse\nIseteeninduses saab taotleda:\n\n- korduvat ID-kaarti t\u00e4iskasvanule\n- ID-kaarti alaealisele lapsele\nIseteeninduses saab dokumenti taotleda ID-kaardi, mobiil-ID, Smart-ID abil.\nIseteeninduses pead \u00fcles laadima foto ning tasuma riigil\u00f5ivu pangalingi kaudu.\nTaotle ID-kaarti iseteeninduses. (https://etaotlus.politsei.ee/#/login)\nTaotlemine posti teel\nID-kaardi taotlemiseks posti teel:", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele ID-kaardi taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "S\u00f5rmej\u00e4lgede andmine. Alates 12. eluaastast on ID-kaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Elamisloakaardi puhul peab s\u00f5rmej\u00e4ljed andma alates 6. eluaastast. Juhul, kui ID-kaardi v\u00f5i pikaajalise elaniku elamisloakaardi taotleja on vanem kui 70-aastane ning temalt on varasemalt v\u00f5etud s\u00f5rmej\u00e4ljed, saab uut dokumenti taotleda ilma uusi s\u00f5rmej\u00e4lgi a...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-id-kaardi-taotlemine"}}, {"content": {"chunk": "- T\u00e4ida taotlusankeet (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996)\n- Tasu riigil\u00f5iv (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad)\n- Tee digitaalne dokumendifoto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised)\nPostiga saada t\u00e4idetud taotlusankeet ja riigil\u00f5ivu tasumist t\u00f5endav dokument aadressil P\u00e4rnu mnt 139, 15060 Tallinn.\nDigitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood v\u00f5i s\u00fcnniaeg) aadressil .\nTaotlusankeet\nPosti teel ID-kaardi taotlemiseks tuleb liikumisv\u00f5imetul inimesel t\u00e4ita ja allkirjastada ankeet.\nID-kaardi taotlusankeet. (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996)\nTaotlusankeedi t\u00e4itmise juhend. (https://www.politsei.ee/et/taotlusankeedi-taeitmise-juhend)\nRiigil\u00f5ivu tasumine\nTasu riigil\u00f5iv ning saada seda t\u00f5endav dokument koos taotlusankeediga.\nVaata riigil\u00f5ivu m\u00e4\u00e4rasid. (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad)\nDokumendifoto\nDigitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood v\u00f5i s\u00fcnniaeg) aadressil .\nVaata dokumendifoto n\u00f5udeid ja soovitusi. (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised)\nKui ID-kaarti aitab taotleda sotsiaalt\u00f6\u00f6taja\nLisaks isikut t\u00f5endava dokumendi taotlusankeedile peab sotsiaalt\u00f6\u00f6taja esitama:", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele ID-kaardi taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "S\u00f5rmej\u00e4lgede andmine. Alates 12. eluaastast on ID-kaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Elamisloakaardi puhul peab s\u00f5rmej\u00e4ljed andma alates 6. eluaastast. Juhul, kui ID-kaardi v\u00f5i pikaajalise elaniku elamisloakaardi taotleja on vanem kui 70-aastane ning temalt on varasemalt v\u00f5etud s\u00f5rmej\u00e4ljed, saab uut dokumenti taotleda ilma uusi s\u00f5rmej\u00e4lgi a...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-id-kaardi-taotlemine"}}, {"content": {"chunk": "- taotleja kirjaliku n\u00f5usoleku, et sotsiaalt\u00f6\u00f6taja tema eest asju ajab\n- valla- v\u00f5i linnavalitsuse v\u00f5i hoolekandeasutuse juhi volikirja\n- linna- v\u00f5i vallavalitsuse v\u00f5i hoolekandeasutuse kinnituse, et taotleja terviseseisund ei v\u00f5imalda tal p\u00fcsivalt taotluse esitamiseks isiklikult p\u00f6\u00f6rduda PPA poole.\nK\u00f5ik dokumendi taotlemiseks vajalik saada e-postiga v\u00f5i postiga aadressile P\u00e4rnu mnt 139, 15060 Tallinn.", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele ID-kaardi taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "S\u00f5rmej\u00e4lgede andmine. Alates 12. eluaastast on ID-kaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Elamisloakaardi puhul peab s\u00f5rmej\u00e4ljed andma alates 6. eluaastast. Juhul, kui ID-kaardi v\u00f5i pikaajalise elaniku elamisloakaardi taotleja on vanem kui 70-aastane ning temalt on varasemalt v\u00f5etud s\u00f5rmej\u00e4ljed, saab uut dokumenti taotleda ilma uusi s\u00f5rmej\u00e4lgi a...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-id-kaardi-taotlemine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 1c866b4c..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Liikumisv\u00f5imetule inimesele PIN-koodide taotlemine\nKui liikumisv\u00f5imetu inimene unustanud dokumendi (ID-kaart, elamisloakaart, digi-ID) PIN-koodid ja turvakoodide \u00fcmbrik on kadunud, siis selleks, et j\u00e4tkata e-teenuste kasutamist v\u00f5i anda digiallkirja, tuleb taotleda uus turvakoodide \u00fcmbrik.\nLiikumisv\u00f5imetul inimesel aitab uusi PIN-koode k\u00e4tte saada sotsiaalt\u00f6\u00f6taja.\nUute PIN-koodide saamiseks v\u00f5tab sotsiaalt\u00f6\u00f6taja teenindusse kaasa\n\n- liikumisv\u00f5imetu inimese kehtiva ID-kaardi/elamisloakaardi\n- taotleja kirjaliku n\u00f5usoleku, et sotsiaalt\u00f6\u00f6taja tema eest asju ajab\n- valla- v\u00f5i linnavalitsuse v\u00f5i hoolekandeasutuse juhi volikirja\n- linna- v\u00f5i vallavalitsuse v\u00f5i hoolekandeasutuse kinnituse, et inimese terviseseisund ei v\u00f5imalda tal p\u00fcsivalt taotluse esitamiseks isiklikult p\u00f6\u00f6rduda PPA poole.", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele PIN-koodide taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "Kui liikumisv\u00f5imetu inimene unustanud dokumendi (ID-kaart, elamisloakaart, digi-ID) PIN-koodid ja turvakoodide \u00fcmbrik on kadunud, siis selleks, et j\u00e4tkata e-teenuste kasutamist v\u00f5i anda digiallkirja, tuleb taotleda uus turvakoodide \u00fcmbrik. Liikumisv\u00f5imetul inimesel aitab uusi PIN-koode k\u00e4tte saada sotsiaalt\u00f6\u00f6taja. Uute PIN-koodide saamiseks v\u00f5tab s...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-pin-koodide-taotlemine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" "b/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" deleted file mode 100644 index 79d3cd07..00000000 --- "a/src/dataset-generation/data/Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.json" +++ /dev/null @@ -1 +0,0 @@ -[{"content": {"chunk": "Liikumisv\u00f5imetule inimesele elamisloakaardi taotlemine\nS\u00f5rmej\u00e4lgede andmine\nAlates kuuendast eluaastast on elamisloakaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed.\nKui dokumenditaotleja ei saa teenindusse s\u00f5rmej\u00e4lgede andmiseks tulla, siis esita PPA-le sellekohane avaldus koos dokumenditaotlusega.\nS\u00f5rmej\u00e4lge ei pea andma, kui nende andmisest on m\u00f6\u00f6dunud v\u00e4hem kui kuus aastat.\nTaotlemine iseteeninduses\nIseteeninduses saab taotleda:\n\n- korduvat elamisloakaarti t\u00e4iskasvanule\n- elamisloakaarti alaealisele lapsele\nIseteeninduses saab dokumenti taotleda elamisloakaardi, mobiil-ID, Smart-ID v\u00f5i pangalingiga.\nIseteeninduses pead \u00fcles laadima foto ning tasuma riigil\u00f5ivu pangalingi kaudu. Samuti saab taaskasutada s\u00f5rmej\u00e4lgi, kui nende andmisest on m\u00f6\u00f6dunud v\u00e4hem kui kuus aastat.\nTaotle elamisloakaarti iseteeninduses. (https://etaotlus.politsei.ee/vid/login)\nTaotlemine posti teel\nPosti teel taotlemine on v\u00f5imalik, kui s\u00f5rmej\u00e4lgede andmisest on m\u00f6\u00f6dunud v\u00e4hem kui kuus aastat.\nElamisloakaardi taotlemiseks posti teel:", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele elamisloakaardi taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "S\u00f5rmej\u00e4lgede andmine. Alates kuuendast eluaastast on elamisloakaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Kui dokumenditaotleja ei saa teenindusse s\u00f5rmej\u00e4lgede andmiseks tulla, siis esita PPA-le sellekohane avaldus koos dokumenditaotlusega. S\u00f5rmej\u00e4lge ei pea andma, kui nende andmisest on m\u00f6\u00f6dunud v\u00e4hem kui kuus aastat. Taotlemine iseteeninduses. Ise...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-elamisloakaardi-taotlemine"}}, {"content": {"chunk": "- T\u00e4ida taotlusankeet (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996)\n- Tasu riigil\u00f5iv (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad)\n- Tee digitaalne dokumendifoto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised)\nPostiga saada t\u00e4idetud taotlusankeet ja riigil\u00f5ivu tasumist t\u00f5endav dokument aadressil P\u00e4rnu mnt 139, 15060 Tallinn.\nDigitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood v\u00f5i s\u00fcnniaeg) aadressil .\nTaotlusankeet\nPosti teel elamisloakaardi taotlemiseks tuleb liikumisv\u00f5imetul inimesel t\u00e4ita ja allkirjastada ankeet.\nElamisloakaardi taotlusankeet. (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996)\nTaotlusankeedi t\u00e4itmise juhend. (https://www.politsei.ee/et/taotlusankeedi-taeitmise-juhend)\nRiigil\u00f5ivu tasumine\nTasu riigil\u00f5iv ning saada seda t\u00f5endav dokument koos taotlusankeediga.\nVaata riigil\u00f5ivu m\u00e4\u00e4rasid. (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad)\nDokumendifoto\nDigitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood v\u00f5i s\u00fcnniaeg) aadressil .\nVaata dokumendifoto n\u00f5udeid ja soovitusi. (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised)\nElamisloakaarti aitab taotleda sotsiaalt\u00f6\u00f6taja\nLisaks isikut t\u00f5endava dokumendi taotlusankeedile peab sotsiaalt\u00f6\u00f6taja esitama:", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele elamisloakaardi taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "S\u00f5rmej\u00e4lgede andmine. Alates kuuendast eluaastast on elamisloakaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Kui dokumenditaotleja ei saa teenindusse s\u00f5rmej\u00e4lgede andmiseks tulla, siis esita PPA-le sellekohane avaldus koos dokumenditaotlusega. S\u00f5rmej\u00e4lge ei pea andma, kui nende andmisest on m\u00f6\u00f6dunud v\u00e4hem kui kuus aastat. Taotlemine iseteeninduses. Ise...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-elamisloakaardi-taotlemine"}}, {"content": {"chunk": "- taotleja terviseseisundit t\u00f5endava dokumendi (arstit\u00f5endi)\n- taotleja kirjaliku n\u00f5usoleku, et sotsiaalt\u00f6\u00f6taja tema eest asju ajab\n- valla- v\u00f5i linnavalitsuse v\u00f5i hoolekandeasutuse juhi volikirja\n- linna- v\u00f5i vallavalitsuse v\u00f5i hoolekandeasutuse kinnituse, et taotleja terviseseisund ei v\u00f5imalda tal p\u00fcsivalt taotluse esitamiseks isiklikult p\u00f6\u00f6rduda PPA poole.\nK\u00f5ik dokumendi taotlemiseks vajalik saada e-postiga v\u00f5i postiga aadressile P\u00e4rnu mnt 139, 15060 Tallinn.", "imgurl": "", "title": "Liikumisv\u00f5imetule inimesele elamisloakaardi taotlemine - Isikut t\u00f5endava dokumendi taotlemine liikumisv\u00f5imetule inimesele - Politsei- ja Piirivalveamet", "description": "S\u00f5rmej\u00e4lgede andmine. Alates kuuendast eluaastast on elamisloakaardi taotlemiseks vaja anda s\u00f5rmej\u00e4ljed. Kui dokumenditaotleja ei saa teenindusse s\u00f5rmej\u00e4lgede andmiseks tulla, siis esita PPA-le sellekohane avaldus koos dokumenditaotlusega. S\u00f5rmej\u00e4lge ei pea andma, kui nende andmisest on m\u00f6\u00f6dunud v\u00e4hem kui kuus aastat. Taotlemine iseteeninduses. Ise...", "source_url": "https://www.politsei.ee/et/juhend/isikut-toendava-dokumendi-taotlemine-liikumisvoimetule-inimesele/liikumisvoimetule-inimesele-elamisloakaardi-taotlemine"}}] \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Eestis_to\303\214\302\210o\303\214\302\210tamise_info_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Eestis_to\303\214\302\210o\303\214\302\210tamise_info_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index b7011e58..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Eestis_to\303\214\302\210o\303\214\302\210tamise_info_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,9 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Eestis töötamise info välismaalasele - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Mis saab siis, kui elamisluba lõppeb või seda ei pikendata? -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. Sel ajal võid Eestis ka töötada. -Mis saab siis, kui elamisluba tunnistatakse kehtetuks? -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi abi migratsiooninõustajalt. (https://www.politsei.ee/et/migratsiooninoustajad) \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index f6dce629..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_alaealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,8 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba alaealisele lapsele pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Loe Eestis viibimise seaduslike aluste kohta. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 6396bfe2..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_a\303\214\302\210riu\303\214\302\210hingu_osanikule_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,9 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba ettevõtluseks äriühingu osanikule - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Mis saab siis, kui elamisluba lõppeb või seda ei pikendata? -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. Sel ajal võid Eestis ka töötada. -Mis saab siis, kui elamisluba tunnistatakse kehtetuks? -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index e966efff..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_fu\303\214\302\210u\303\214\302\210silisest_isikust_ettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,9 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba ettevõtluseks füüsilisest isikust ettevõtjale - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Mis saab siis, kui elamisluba lõppeb või seda ei pikendata? -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. Sel ajal võid Eestis ka töötada. -Mis saab siis, kui elamisluba tunnistatakse kehtetuks? -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 9257c8a1..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_iduettevo\303\214\302\203tjale_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,9 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba ettevõtluseks iduettevõtjale - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Mis saab siis, kui elamisluba lõppeb või seda ei pikendata? -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. Sel ajal võid Eestis ka töötada. -Mis saab siis, kui elamisluba tunnistatakse kehtetuks? -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_suurinvestorile_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_suurinvestorile_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 09f8a009..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ettevo\303\214\302\203tluseks_suurinvestorile_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,9 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba ettevõtluseks suurinvestorile - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Mis saab siis, kui elamisluba lõppeb või seda ei pikendata? -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. Sel ajal võid Eestis ka töötada. -Mis saab siis, kui elamisluba tunnistatakse kehtetuks? -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_o\303\214\302\203ppimiseks_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_o\303\214\302\203ppimiseks_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 8c042b81..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_o\303\214\302\203ppimiseks_va\303\214\302\210lismaalasele_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,7 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba õppimiseks välismaalasele - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 270 päevaks. Sel ajal võid Eestis ka töötada. -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Lähemalt saad Eestis viibimise seaduslike aluste kohta lugeda siit. -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 40b1f4b7..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_pereliikmele_abikaasa_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,8 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba pereliikmele abikaasa juurde elama asumiseks - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Loe Eestis viibimise seaduslike aluste kohta. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 770c9824..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,7 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba täisealisele lapsele pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 270 päevaks. Sel ajal võid Eestis ka töötada. -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Lähemalt saad Eestis viibimise seaduslike aluste kohta lugeda siit. -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 5fb58082..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,7 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Elamisluba vanemale või vanavanemale pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 270 päevaks. Sel ajal võid Eestis ka töötada. -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Lähemalt saad Eestis viibimise seaduslike aluste kohta lugeda siit. -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 3cbda388..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Perekonnaliikme_ta\303\214\302\210htajaline_elamiso\303\214\302\203igus_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,8 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Perekonnaliikme tähtajaline elamisõigus - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Mis saab siis, kui elamisõigus lõppeb või seda ei pikendata? -Kui sinu elamisõigus lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. Sel ajal võid Eestis ka töötada. -Mis saab siis, kui elamisõigus tunnistatakse kehtetuks? -Kui elamisõigus tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt. (https://www.politsei.ee/et/migratsiooninoustajad) \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index e5469fb0..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_lo\303\214\302\203ppemine_vo\303\214\302\203i_kehtetuks_tunnistamine_-_Ta\303\214\302\210htajaline_elamisluba_pu\303\214\302\210sivalt_elamiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,8 +0,0 @@ -# Elamisloa lõppemine või kehtetuks tunnistamine - Tähtajaline elamisluba püsivalt elamiseks - Politsei- ja Piirivalveamet - -Elamisloa lõppemine või kehtetuks tunnistamine -Kui sinu elamisluba lõppeb või seda ei pikendata, võid Eestisse jääda veel kuni 90 päevaks. -Kui elamisluba tunnistatakse kehtetuks, pead kohe Eestist lahkuma. -Loe Eestis viibimise seaduslike aluste kohta. - -Migratsiooninõustaja on sulle igal sammul abiks. Küsi migratsiooninõustajalt (https://www.politsei.ee/et/migratsiooninoustajad) . \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index bfcb3f8a..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_ta\303\214\302\210isealisele_lapsele_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,25 +0,0 @@ -# Elamisloa pikendamine - Elamisluba täisealisele lapsele pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet - -Elamisloa pikendamine -Kuidas pikendada elamisluba? - -- Taotle elamisloa pikendamist vähemalt 2 kuud enne elamisloa lõppemist. -- Esita nõutud dokumendid ja tasu riigilõiv. -- Õigeaegselt esitatud taotlusele saad vastuse hiljemalt 10 päeva enne kehtiva elamisloa lõppemist. -Kui andsid sõrmejäljed viimase 6 aasta jooksul ja sul on kehtiv elamisloakaart, saad elamisluba pikendada ka e-posti teel. -Kuidas taotleda elamisloa pikendamist Politsei- ja Piirivalveameti teeninduses? -Kui viibid Eestis, esita elamisloa taotlus Politsei- ja Piirivalveameti teeninduses. Selleks broneeri aeg veebis. (https://broneering.politsei.ee/) -Millised dokumendid pead esitama elamisloa pikendamiseks? -Kui taotled elamisloa pikendamist, esita: -- tähtajalise elamisloa pikendamise taotlus (https://www.politsei.ee/files/dokumendid/ankeedid/2023_03/tep-taotlus-est-08.02.2024.pdf?f4680da745) -- andmed lähedaste sugulaste ja perekonnaliikmete kohta ja taotlemisel nõutud dokumendid (https://www.politsei.ee/files/dokumendid/ankeedid/2023/pereliikmed-est-27.12.2023.pdf?d1f55577c5) vaid sel juhul, kui andmed on muutunud -- lapsevanema kutse (https://www.politsei.ee/files/elamine ja tootamine eestis/ankeedid/estlapsevanemakutse-est.pdf?b41e3ba86a) – täidab ja allkirjastab vanem kelle juurde elamisluba pikendatakse või kirjalik kinnitus, et andmed ei ole muutunud -- eluloolised andmed (https://www.politsei.ee/files/dokumendid/ankeedid/2023/eluloolised-andmed-est-2023-03-18.pdf?3d91c6b52d) -- lisaankeet (https://www.politsei.ee/files/dokumendid/ankeedid/2023/lisaankeet-1-est-2023-03-17.pdf?0f03b87480) -- isikut tõendav dokument -- digitaalne foto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised) -- riigilõivu tasumist tõendav dokument (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad/elamisluba-ja-elamisoigus) , näiteks maksekorraldus. -Menetleja võib vajadusel paluda lisaandmeid või -dokumente. -Loe välisriigis välja antud dokumentide nõuete kohta. (http://vm.ee/et/avaliku-dokumendi-legaliseerimine) -Kui kiiresti otsustatakse elamisloa pikendamine? -Otsus elamisloa pikendamise või sellest keeldumise kohta tehakse hiljemalt 10 päeva enne sinu elamisloa kehtivusaja lõppemist, kui esitasid pikendamise taotluse 2 kuud enne elamisloa kehtivuse lõppemist. \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 89dd26c6..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Elamisloa_pikendamine_-_Elamisluba_vanemale_vo\303\214\302\203i_vanavanemale_pereliikme_juurde_elama_asumiseks_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,25 +0,0 @@ -# Elamisloa pikendamine - Elamisluba vanemale või vanavanemale pereliikme juurde elama asumiseks - Politsei- ja Piirivalveamet - -Elamisloa pikendamine -Kuidas pikendada elamisluba? - -- Taotle elamisloa pikendamist vähemalt 2 kuud enne elamisloa lõppemist. -- Esita nõutud dokumendid ja tasu riigilõiv. -- Õigeaegselt esitatud taotlusele saad vastuse hiljemalt 10 päeva enne kehtiva elamisloa lõppemist. -Kui andsid sõrmejäljed viimase 6 aasta jooksul ja sul on kehtiv elamisloakaart, saad elamisluba pikendada ka e-posti teel. -Kuidas taotleda elamisloa pikendamist Politsei- ja Piirivalveameti teeninduses? -Kui viibid Eestis, esita elamisloa taotlus Politsei- ja Piirivalveameti teeninduses. Selleks broneeri aeg veebis. -Millised dokumendid pead esitama elamisloa pikendamiseks? -Kui taotled elamisloa pikendamist, esita: -- tähtajalise elamisloa pikendamise taotlus (https://www.politsei.ee/files/dokumendid/ankeedid/2023_03/tep-taotlus-est-08.02.2024.pdf?f4680da745) -- andmed lähedaste sugulaste ja perekonnaliikmete kohta ja taotlemisel nõutud dokumendid (https://www.politsei.ee/files/dokumendid/ankeedid/2023/pereliikmed-est-27.12.2023.pdf?d1f55577c5) vaid sel juhul, kui andmed on muutunud -- Lapse või lapselapse kutse (/files/dokumendid/ankeedid/2023_4/esttaisealiselapsevoilapselapsekutse-est.pdf?17ce73ac1c) , mille täidab ja allkirjastab laps või lapselaps või kirjalik kinnitus, et andmed ei ole muutunud -- eluloolised andmed (https://www.politsei.ee/files/dokumendid/ankeedid/2023/eluloolised-andmed-est-2023-03-18.pdf?3d91c6b52d) -- lisaankeet (https://www.politsei.ee/files/dokumendid/ankeedid/2023/lisaankeet-1-est-2023-03-17.pdf?0f03b87480) -- isikut tõendav dokument -- digitaalne foto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised) -- riigilõivu tasumist tõendav dokument (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad/elamisluba-ja-elamisoigus) , näiteks maksekorraldus. -Menetleja võib vajadusel paluda lisaandmeid või -dokumente. -Loe välisriigis välja antud dokumentide nõuete kohta. (http://vm.ee/et/avaliku-dokumendi-legaliseerimine) -Kui kiiresti otsustatakse elamisloa pikendamine? -Otsus elamisloa pikendamise või sellest keeldumise kohta tehakse hiljemalt 10 päeva enne sinu elamisloa kehtivusaja lõppemist, kui esitasid pikendamise taotluse 2 kuud enne elamisloa kehtivuse lõppemist. \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index e6c68ec2..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumis-_ja_allkirjavo\303\214\302\203imetule_inimese_dokumendi_ka\303\214\302\210ttesaamine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,4 +0,0 @@ -# Liikumis- ja allkirjavõimetule inimese dokumendi kättesaamine - Isikut tõendava dokumendi taotlemine liikumisvõimetule inimesele - Politsei- ja Piirivalveamet - -Liikumis- ja allkirjavõimetule inimese dokumendi kättesaamine -Kui dokument on valmis, siis võtame taotleja kontaktisikuga ühendust ja lepime kokku dokumendi kättesaamise aja ja koha. \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 1d06570d..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_ID-kaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,39 +0,0 @@ -# Liikumisvõimetule inimesele ID-kaardi taotlemine - Isikut tõendava dokumendi taotlemine liikumisvõimetule inimesele - Politsei- ja Piirivalveamet - -Liikumisvõimetule inimesele ID-kaardi taotlemine -Sõrmejälgede andmine -Alates 12. eluaastast on ID-kaardi taotlemiseks vaja anda sõrmejäljed. Elamisloakaardi puhul peab sõrmejäljed andma alates 6. eluaastast. -Juhul, kui ID-kaardi või pikaajalise elaniku elamisloakaardi taotleja on vanem kui 70-aastane ning temalt on varasemalt võetud sõrmejäljed, saab uut dokumenti taotleda ilma uusi sõrmejälgi andmata. -Muudel juhtudel, kui liikumisvõimetu taotleja puhul on sõrmejäljed hõivatud vähem kui 11 aastat tagasi, saab ID-kaarti või elamisloakaarti taotleda ilma uusi sõrmejälgi andmata. Dokumendi taotlemiseks esita PPA-le koos dokumenditaotlusega taotleja terviseseisundit tõendava dokument (arstitõend). -Kui sõrmejälgede andmine on vajalik, kuid dokumenditaotleja ei saa teenindusse sõrmejälgede andmiseks tulla, siis esita PPA-le koos dokumenditaotluse ja taotleja terviseseisundit tõendava dokumendiga (arstitõend) avaldus sõrmejälgede hõivamiseks taotleja viibimiskohas. -Taotlemine iseteeninduse -Iseteeninduses saab taotleda: - -- korduvat ID-kaarti täiskasvanule -- ID-kaarti alaealisele lapsele -Iseteeninduses saab dokumenti taotleda ID-kaardi, mobiil-ID, Smart-ID abil. -Iseteeninduses pead üles laadima foto ning tasuma riigilõivu pangalingi kaudu. -Taotle ID-kaarti iseteeninduses. (https://etaotlus.politsei.ee/#/login) -Taotlemine posti teel -ID-kaardi taotlemiseks posti teel: -- Täida taotlusankeet (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996) -- Tasu riigilõiv (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad) -- Tee digitaalne dokumendifoto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised) -Postiga saada täidetud taotlusankeet ja riigilõivu tasumist tõendav dokument aadressil Pärnu mnt 139, 15060 Tallinn. -Digitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood või sünniaeg) aadressil . -Taotlusankeet -Posti teel ID-kaardi taotlemiseks tuleb liikumisvõimetul inimesel täita ja allkirjastada ankeet. -ID-kaardi taotlusankeet. (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996) -Taotlusankeedi täitmise juhend. (https://www.politsei.ee/et/taotlusankeedi-taeitmise-juhend) -Riigilõivu tasumine -Tasu riigilõiv ning saada seda tõendav dokument koos taotlusankeediga. -Vaata riigilõivu määrasid. (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad) -Dokumendifoto -Digitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood või sünniaeg) aadressil . -Vaata dokumendifoto nõudeid ja soovitusi. (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised) -Kui ID-kaarti aitab taotleda sotsiaaltöötaja -Lisaks isikut tõendava dokumendi taotlusankeedile peab sotsiaaltöötaja esitama: -- taotleja kirjaliku nõusoleku, et sotsiaaltöötaja tema eest asju ajab -- valla- või linnavalitsuse või hoolekandeasutuse juhi volikirja -- linna- või vallavalitsuse või hoolekandeasutuse kinnituse, et taotleja terviseseisund ei võimalda tal püsivalt taotluse esitamiseks isiklikult pöörduda PPA poole. -Kõik dokumendi taotlemiseks vajalik saada e-postiga või postiga aadressile Pärnu mnt 139, 15060 Tallinn. \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 37573809..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_PIN-koodide_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,11 +0,0 @@ -# Liikumisvõimetule inimesele PIN-koodide taotlemine - Isikut tõendava dokumendi taotlemine liikumisvõimetule inimesele - Politsei- ja Piirivalveamet - -Liikumisvõimetule inimesele PIN-koodide taotlemine -Kui liikumisvõimetu inimene unustanud dokumendi (ID-kaart, elamisloakaart, digi-ID) PIN-koodid ja turvakoodide ümbrik on kadunud, siis selleks, et jätkata e-teenuste kasutamist või anda digiallkirja, tuleb taotleda uus turvakoodide ümbrik. -Liikumisvõimetul inimesel aitab uusi PIN-koode kätte saada sotsiaaltöötaja. -Uute PIN-koodide saamiseks võtab sotsiaaltöötaja teenindusse kaasa - -- liikumisvõimetu inimese kehtiva ID-kaardi/elamisloakaardi -- taotleja kirjaliku nõusoleku, et sotsiaaltöötaja tema eest asju ajab -- valla- või linnavalitsuse või hoolekandeasutuse juhi volikirja -- linna- või vallavalitsuse või hoolekandeasutuse kinnituse, et inimese terviseseisund ei võimalda tal püsivalt taotluse esitamiseks isiklikult pöörduda PPA poole. \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 6574bc47..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Liikumisvo\303\214\302\203imetule_inimesele_elamisloakaardi_taotlemine_-_Isikut_to\303\214\302\203endava_dokumendi_taotlemine_liikumisvo\303\214\302\203imetule_inimesele_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,40 +0,0 @@ -# Liikumisvõimetule inimesele elamisloakaardi taotlemine - Isikut tõendava dokumendi taotlemine liikumisvõimetule inimesele - Politsei- ja Piirivalveamet - -Liikumisvõimetule inimesele elamisloakaardi taotlemine -Sõrmejälgede andmine -Alates kuuendast eluaastast on elamisloakaardi taotlemiseks vaja anda sõrmejäljed. -Kui dokumenditaotleja ei saa teenindusse sõrmejälgede andmiseks tulla, siis esita PPA-le sellekohane avaldus koos dokumenditaotlusega. -Sõrmejälge ei pea andma, kui nende andmisest on möödunud vähem kui kuus aastat. -Taotlemine iseteeninduses -Iseteeninduses saab taotleda: - -- korduvat elamisloakaarti täiskasvanule -- elamisloakaarti alaealisele lapsele -Iseteeninduses saab dokumenti taotleda elamisloakaardi, mobiil-ID, Smart-ID või pangalingiga. -Iseteeninduses pead üles laadima foto ning tasuma riigilõivu pangalingi kaudu. Samuti saab taaskasutada sõrmejälgi, kui nende andmisest on möödunud vähem kui kuus aastat. -Taotle elamisloakaarti iseteeninduses. (https://etaotlus.politsei.ee/vid/login) -Taotlemine posti teel -Posti teel taotlemine on võimalik, kui sõrmejälgede andmisest on möödunud vähem kui kuus aastat. -Elamisloakaardi taotlemiseks posti teel: -- Täida taotlusankeet (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996) -- Tasu riigilõiv (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad) -- Tee digitaalne dokumendifoto (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised) -Postiga saada täidetud taotlusankeet ja riigilõivu tasumist tõendav dokument aadressil Pärnu mnt 139, 15060 Tallinn. -Digitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood või sünniaeg) aadressil . -Taotlusankeet -Posti teel elamisloakaardi taotlemiseks tuleb liikumisvõimetul inimesel täita ja allkirjastada ankeet. -Elamisloakaardi taotlusankeet. (https://www.politsei.ee/files/Dokumentide taotlemise ankeedid/isikut-t-endavate-dokumentide-taotlusankeet-est-2019-01-14.pdf?c39fc91996) -Taotlusankeedi täitmise juhend. (https://www.politsei.ee/et/taotlusankeedi-taeitmise-juhend) -Riigilõivu tasumine -Tasu riigilõiv ning saada seda tõendav dokument koos taotlusankeediga. -Vaata riigilõivu määrasid. (https://www.politsei.ee/et/juhend/riigiloivude-maeaerad) -Dokumendifoto -Digitaalne dokumendifoto saada koos infoga fotol olevast inimesest (nimi, perekonnanimi, isikukood või sünniaeg) aadressil . -Vaata dokumendifoto nõudeid ja soovitusi. (https://www.politsei.ee/et/dokumendifoto-nouded-ja-juhised) -Elamisloakaarti aitab taotleda sotsiaaltöötaja -Lisaks isikut tõendava dokumendi taotlusankeedile peab sotsiaaltöötaja esitama: -- taotleja terviseseisundit tõendava dokumendi (arstitõendi) -- taotleja kirjaliku nõusoleku, et sotsiaaltöötaja tema eest asju ajab -- valla- või linnavalitsuse või hoolekandeasutuse juhi volikirja -- linna- või vallavalitsuse või hoolekandeasutuse kinnituse, et taotleja terviseseisund ei võimalda tal püsivalt taotluse esitamiseks isiklikult pöörduda PPA poole. -Kõik dokumendi taotlemiseks vajalik saada e-postiga või postiga aadressile Pärnu mnt 139, 15060 Tallinn. \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Projekt_\303\242\302\200\302\236Alaealiste_erikohtlemise_su\303\214\302\210steemi_loomine\303\242\302\200\302\234_-_Ennetusprojektid_ja_kampaaniad_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Projekt_\303\242\302\200\302\236Alaealiste_erikohtlemise_su\303\214\302\210steemi_loomine\303\242\302\200\302\234_-_Ennetusprojektid_ja_kampaaniad_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index c10b13a9..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/Projekt_\303\242\302\200\302\236Alaealiste_erikohtlemise_su\303\214\302\210steemi_loomine\303\242\302\200\302\234_-_Ennetusprojektid_ja_kampaaniad_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,8 +0,0 @@ -# Projekt „Alaealiste erikohtlemise süsteemi loomine“ - Ennetusprojektid ja kampaaniad - Politsei- ja Piirivalveamet - -Projekt „Alaealiste erikohtlemise süsteemi loomine“ -Projekt toetab alaealiste õigusrikkujate erikohtlemise reformi: aitab tuua praktikasse uusi lähenemisviise, jätkusuutlikke lahendusi, mõjusaid sekkumisi, efektiivseid koostöövorme ja -mudeleid ning ajakohastada valdkonna spetsialistide teadmisi ja oskusi. -Politseilised tegevused on kokku lepitud Justiitsministeeriumi ja PPA vahel sõlmitud koostöökokkuleppes, mille eesmärgiks on politseiametnike ja teiste asjakohaste spetsialistide oskuste ja teadmiste parendamine töös laste ja noortega. Selleks on planeeritud erinevaid koolitusi, õppevisiite ning uute lühisekkumiste meetodite kasutuselevõtmine. -Projekti kestvus on kuni 30.04.2024 -Projekti rahastatakse Euroopa Majanduspiirkonna ja Norra finantsmehhanismi 2014-2021 programmi „Local Development and Poverty Reduction" -Lisainformatsioon: Kaisa Kägu 53493030 \ No newline at end of file diff --git "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/So\303\214\302\203jarelvade,_laskemoona_ja_lahingumoona_ka\303\214\302\210itlemise_tegevusluba_-_Majandustegevuse_load_-_Politsei-_ja_Piirivalveamet.txt" "b/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/So\303\214\302\203jarelvade,_laskemoona_ja_lahingumoona_ka\303\214\302\210itlemise_tegevusluba_-_Majandustegevuse_load_-_Politsei-_ja_Piirivalveamet.txt" deleted file mode 100644 index 6ecf32fc..00000000 --- "a/src/dataset-generation/data/output_Politsei-_ja_Piirivalveamet/So\303\214\302\203jarelvade,_laskemoona_ja_lahingumoona_ka\303\214\302\210itlemise_tegevusluba_-_Majandustegevuse_load_-_Politsei-_ja_Piirivalveamet.txt" +++ /dev/null @@ -1,15 +0,0 @@ -# Sõjarelvade, laskemoona ja lahingumoona käitlemise tegevusluba - Majandustegevuse load - Politsei- ja Piirivalveamet - -Sõjarelvade, laskemoona ja lahingumoona käitlemise tegevusluba -Sõjarelvadega seotud tegevusluba on vaja tegutsemiseks järgmistel tegevusaladel: - -- sõjarelva, selle olulise osa või lõhkeainet mittesisaldava lahingumoona valmistamine -- sõjarelva parandamine või ümbertegemine teenuse osutamisena -- sõjarelva, laskemoona või lahingumoona vedamine -- sõiduki, veesõiduki, õhusõiduki või muu toote valmistamine, millele paigaldatakse sõjarelv -Lisaks võib taotleda tegevusluba tegutsemiseks järgmistel tegevusaladel: - -- laskemoona või lahingumoona hoidmise teenuse osutamine -- laskemoona või lõhkeainet sisaldava lahingumoona valmistamine -Tegevusloa taotlemine (https://www.eesti.ee/et/erinouetega-tegevusalad/relvaohutus/sojarelvade-kaeitlemise-tegevusluba/) -Väljastatud tegevusload (https://mtr.mkm.ee/tegevusluba?m=97) \ No newline at end of file From cb3771ae9204da437086c26a2ac03b267ea78dd3 Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Fri, 16 May 2025 18:57:14 +0530 Subject: [PATCH 002/195] migration scripts and endpoint changes --- .github/workflows/ci-cd-development.yml | 6 +-- ...ifier-script-v1-classification-results.sql | 13 ++++++ ...fier-script-v2-classification-feedback.sql | 12 +++++ DSL/Liquibase/liquibase.properties | 6 +++ DSL/Liquibase/master.yml | 5 ++ .../POST/get-inference-id.sql | 3 ++ .../POST/store-classification-feedback.sql | 5 +- .../global-classifier/POST/chats/feedback.yml | 38 +++++++++++---- docker-compose.yml | 46 ++++++++----------- migrate.sh | 15 ++++++ 10 files changed, 105 insertions(+), 44 deletions(-) create mode 100644 DSL/Liquibase/changelog/global-classifier-script-v1-classification-results.sql create mode 100644 DSL/Liquibase/changelog/global-classifier-script-v2-classification-feedback.sql create mode 100644 DSL/Liquibase/liquibase.properties create mode 100644 DSL/Liquibase/master.yml create mode 100644 DSL/Resql/global-classifier/POST/get-inference-id.sql create mode 100644 migrate.sh diff --git a/.github/workflows/ci-cd-development.yml b/.github/workflows/ci-cd-development.yml index e07c9aa1..d5b01599 100644 --- a/.github/workflows/ci-cd-development.yml +++ b/.github/workflows/ci-cd-development.yml @@ -7,7 +7,7 @@ on: jobs: deploy: - runs-on: self-hosted + runs-on: [self-hosted, development] steps: - name: Checkout code @@ -16,8 +16,6 @@ jobs: - name: Stop and remove existing containers run: | docker compose down - - name: Build & up Docker container run: | - docker compose up --build -d - + docker compose up --build -d \ No newline at end of file diff --git a/DSL/Liquibase/changelog/global-classifier-script-v1-classification-results.sql b/DSL/Liquibase/changelog/global-classifier-script-v1-classification-results.sql new file mode 100644 index 00000000..a80a32e6 --- /dev/null +++ b/DSL/Liquibase/changelog/global-classifier-script-v1-classification-results.sql @@ -0,0 +1,13 @@ +-- liquibase formatted sql + +-- changeset erangi :global-classifier classification-results-table +CREATE TABLE public.classification_results ( + id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY, + chat_id VARCHAR(255) NOT NULL, + inference_id VARCHAR(255) NOT NULL, + target_agencies JSONB NOT NULL, + classification_timestamp TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT classification_results_pkey PRIMARY KEY (id), + CONSTRAINT unique_inference_id UNIQUE (inference_id) +); \ No newline at end of file diff --git a/DSL/Liquibase/changelog/global-classifier-script-v2-classification-feedback.sql b/DSL/Liquibase/changelog/global-classifier-script-v2-classification-feedback.sql new file mode 100644 index 00000000..7bf2e03a --- /dev/null +++ b/DSL/Liquibase/changelog/global-classifier-script-v2-classification-feedback.sql @@ -0,0 +1,12 @@ +-- liquibase formatted sql + +-- changeset erangi:global-classifier classification-feedback-table +CREATE TABLE public.classification_feedback ( + id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY, + chat_id VARCHAR(255) NOT NULL, + inference_id VARCHAR(255) NOT NULL, + actual_agency_id VARCHAR(255) NOT NULL, + feedback_timestamp TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT classification_feedback_pkey PRIMARY KEY (id), + CONSTRAINT unique_feedback_per_inference UNIQUE (inference_id) +); diff --git a/DSL/Liquibase/liquibase.properties b/DSL/Liquibase/liquibase.properties new file mode 100644 index 00000000..e20c6c98 --- /dev/null +++ b/DSL/Liquibase/liquibase.properties @@ -0,0 +1,6 @@ +changelogFile: /changelog/master.yml +url: jdbc:postgresql://localhost:5435/global-classifier +username: postgres +password: dbadmin +secureParsing: false +liquibase.hub.mode=off diff --git a/DSL/Liquibase/master.yml b/DSL/Liquibase/master.yml new file mode 100644 index 00000000..0861e812 --- /dev/null +++ b/DSL/Liquibase/master.yml @@ -0,0 +1,5 @@ +databaseChangeLog: + - include: + file: changelog/global-classifier-script-v1-classification-results.sql + - include: + file: changelog/global-classifier-script-v2-classification-feedback.sql \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-inference-id.sql b/DSL/Resql/global-classifier/POST/get-inference-id.sql new file mode 100644 index 00000000..c082bc79 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-inference-id.sql @@ -0,0 +1,3 @@ +SELECT + inference_id FROM classification_results +WHERE chat_id = :chatId; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/store-classification-feedback.sql b/DSL/Resql/global-classifier/POST/store-classification-feedback.sql index c551cf4a..da5e560c 100644 --- a/DSL/Resql/global-classifier/POST/store-classification-feedback.sql +++ b/DSL/Resql/global-classifier/POST/store-classification-feedback.sql @@ -1,12 +1,13 @@ INSERT INTO classification_feedback ( chat_id, inference_id, - actual_agency_id + actual_agency_id, + feedback_timestamp ) VALUES ( :chatId, :inferenceId, :actualAgencyId, - :feedback_timestamp::timestamp + :feedbackTimestamp::timestamp ) RETURNING id, chat_id, inference_id, feedback_timestamp; \ No newline at end of file diff --git a/DSL/Ruuter.public/global-classifier/POST/chats/feedback.yml b/DSL/Ruuter.public/global-classifier/POST/chats/feedback.yml index 093af207..8aff0ee0 100644 --- a/DSL/Ruuter.public/global-classifier/POST/chats/feedback.yml +++ b/DSL/Ruuter.public/global-classifier/POST/chats/feedback.yml @@ -11,9 +11,6 @@ declaration: - field: chatId type: string description: "ID of the chat that was classified" - - field: inferenceId - type: string - description: "Unique identifier of the classification prediction" - field: actualAgencyId type: string description: "The agency that actually handled the request" @@ -21,25 +18,46 @@ declaration: extractFeedbackData: assign: chatId: ${incoming.body.chatId} - inferenceId: ${incoming.body.inferenceId} actualAgencyId: ${incoming.body.actualAgencyId ?? ""} - timestamp: ${new Date().toISOString()} next: validate_feedback validate_feedback: switch: - - condition: ${!chatId || inferenceId === undefined} + - condition: ${!chatId} next: return_validation_error + next: get_inference_id + +get_inference_id: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-inference-id" + body: + chatId: ${chatId} + result: inference_result next: log_feedback + error: handle_inference_error + +handle_inference_error: + log: "Error retrieving inference_id: ${inference_result.error}" + next: return_inference_error + return_validation_error: status: 400 - return: "Invalid feedback payload. chatId, inferenceId are required fields." + return: "Invalid feedback payload. chatId is required." next: end log_feedback: - log: "Classification feedback received for chat ${chatId}" + log: "Classification feedback received for chat ${chatId} with inference ID ${inference_result.response.body[0].inferenceId}" + assign: + inferenceId: ${inference_result.response.body[0].inferenceId} next: store_feedback + error: return_inference_error + +return_inference_error: + status: 404 + return: "Could not find classification result for the provided chat ID." + next: end store_feedback: call: http.post @@ -49,6 +67,7 @@ store_feedback: chatId: ${chatId} inferenceId: ${inferenceId} actualAgencyId: ${actualAgencyId} + feedbackTimestamp: ${new Date().toISOString()} result: store_result error: handle_storage_error next: check_storage_result @@ -75,5 +94,4 @@ return_storage_error: return_success: status: 200 return: "Feedback received and processed successfully" - next: end - + next: end \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 2e7412c0..e982a10c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,8 +17,7 @@ services: ports: - 8086:8086 networks: - bykstack: - ipv4_address: 172.25.0.2 + - bykstack-gc cpus: "0.5" mem_limit: "512M" @@ -40,8 +39,7 @@ services: ports: - 8088:8088 networks: - bykstack: - ipv4_address: 172.25.0.3 + - bykstack-gc cpus: "0.5" mem_limit: "512M" @@ -58,8 +56,7 @@ services: # ports: # - 3000:3000 # networks: - # bykstack: - # ipv4_address: 172.25.0.4 + # - bykstack-gc tim: container_name: tim @@ -72,8 +69,7 @@ services: ports: - 8085:8085 networks: - bykstack: - ipv4_address: 172.25.0.5 + - bykstack-gc extra_hosts: - "host.docker.internal:host-gateway" cpus: "0.5" @@ -92,8 +88,7 @@ services: ports: - 9876:5432 networks: - bykstack: - ipv4_address: 172.25.0.6 + - bykstack-gc # authentication-layer: # container_name: authentication-layer @@ -101,8 +96,7 @@ services: # ports: # - 3004:3004 # networks: - # bykstack: - # ipv4_address: 172.25.0.8 + # - bykstack-gc resql: container_name: resql @@ -112,7 +106,7 @@ services: environment: - sqlms.datasources.[0].name=byk - sqlms.datasources.[0].jdbcUrl=jdbc:postgresql://users_db:5432/global-classifier #For LocalDb Use - # sqlms.datasources.[0].jdbcUrl=jdbc:postgresql://171.22.247.13:5433/byk?sslmode=require + # sqlms.datasources.[0].jdbcUrl=jdbc:postgresql://171.22.247.13:5435/byk?sslmode=require - sqlms.datasources.[0].username=postgres - sqlms.datasources.[0].password=dbadmin - logging.level.org.springframework.boot=INFO @@ -121,8 +115,7 @@ services: volumes: - ./DSL/Resql:/DSL networks: - bykstack: - ipv4_address: 172.25.0.9 + - bykstack-gc users_db: container_name: users_db @@ -132,13 +125,12 @@ services: - POSTGRES_PASSWORD=dbadmin - POSTGRES_DB=global-classifier ports: - - 5433:5432 + - 5435:5432 volumes: - ~/buerokratt_classifier/db_files:/var/lib/postgresql/data networks: - bykstack: - ipv4_address: 172.25.0.10 + - bykstack-gc restart: always init: @@ -148,8 +140,7 @@ services: - shared-volume:/shared - ./model_trainer:/app/model_trainer networks: - bykstack: - ipv4_address: 172.25.0.12 + - bykstack-gc classifier-service: container_name: classifier-service @@ -158,8 +149,7 @@ services: ports: - "8090:8090" networks: - bykstack: - ipv4_address: 172.25.0.27 + - bykstack-gc volumes: - ./src/classifier-service:/app environment: @@ -171,10 +161,10 @@ volumes: opensearch-data: networks: - bykstack: - name: bykstack + bykstack-gc: + name: bykstack-gc driver: bridge - ipam: - config: - - subnet: 172.25.0.0/27 - gateway: 172.25.0.1 \ No newline at end of file + # ipam: + # config: + # - subnet: 172.25.0.0/27 + # gateway: 172.25.0.1 \ No newline at end of file diff --git a/migrate.sh b/migrate.sh new file mode 100644 index 00000000..1e1b715e --- /dev/null +++ b/migrate.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Function to parse ini file and extract the value for a given key under a given section +get_ini_value() { + local file=$1 + local key=$2 + awk -F '=' -v key="$key" '$1 == key { gsub(/^[ \t]+|[ \t]+$/, "", $2); print $2; exit }' "$file" +} + +# Get the values from dsl_config.ini +INI_FILE="constants.ini" +DB_PASSWORD=$(get_ini_value "$INI_FILE" "DB_PASSWORD") + + +docker run --rm --network bykstack-gc -v `pwd`/DSL/Liquibase/changelog:/liquibase/changelog -v `pwd`/DSL/Liquibase/master.yml:/liquibase/master.yml -v `pwd`/DSL/Liquibase/data:/liquibase/data liquibase/liquibase --defaultsFile=/liquibase/changelog/liquibase.properties --changelog-file=master.yml --url=jdbc:postgresql://users_db:5432/global-classifier?user=postgres --password=dbadmin update \ No newline at end of file From 769f53dc960eb36935ef499099d7d4e81a7c887b Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Wed, 21 May 2025 15:13:31 +0530 Subject: [PATCH 003/195] user management module with existing components & eslint integration to ci cd --- .github/workflows/ci-cd-development.yml | 31 +- .github/workflows/workflow.yml | 0 .../classifier-script-v5-configuration.sql | 15 + ...l-classifier-script-v3-user-management.sql | 41 + ...al-classifier-script-v4-authority-data.xml | 17 + DSL/Liquibase/data/authority.csv | 3 + DSL/Liquibase/master.yml | 9 +- .../global-classifier/POST/delete-user.sql | 36 + .../POST/get-configuration.sql | 5 + .../global-classifier/POST/get-user-role.sql | 10 + .../POST/get-user-with-roles.sql | 15 + DSL/Resql/global-classifier/POST/get-user.sql | 5 + .../POST/get-users-with-roles-by-role.sql | 41 + .../POST/insert-user-role.sql | 2 + .../global-classifier/POST/insert-user.sql | 2 + .../global-classifier/POST/update-user.sql | 16 + .../global-classifier/GET/.guard | 28 + .../global-classifier/GET/accounts/logout.yml | 63 + .../GET/accounts/user-role.yml | 53 + .../global-classifier/GET/auth/jwt/extend.yml | 41 + .../GET/auth/jwt/userinfo.yml | 27 + .../global-classifier/POST/.guard | 28 + .../global-classifier/POST/accounts/.guard | 28 + .../global-classifier/POST/accounts/add.yml | 89 + .../POST/accounts/delete.yml | 29 + .../global-classifier/POST/accounts/edit.yml | 94 + .../POST/accounts/exists.yml | 40 + .../global-classifier/POST/accounts/users.yml | 39 + .../TEMPLATES/check-user-authority-admin.yml | 52 + .../TEMPLATES/check-user-authority.yml | 52 + .../global-classifier/POST/auth/login.yml | 79 + GUI/.dockerignore | 7 + GUI/.env.development | 8 + GUI/.eslintrc.json | 3 + GUI/.gitignore | 30 + GUI/.prettierignore | 1 + GUI/.prettierrc | 6 + GUI/Dockerfile.dev | 14 + GUI/docker-compose.yml | 10 + GUI/entrypoint.sh | 7 + GUI/i18n.ts | 26 + GUI/index.html | 14 + GUI/package-lock.json | 15860 ++++++++++++++++ GUI/package.json | 117 + GUI/public/favicon.ico | Bin 0 -> 15406 bytes GUI/public/mockServiceWorker.js | 303 + GUI/rebuild.sh | 12 + GUI/src/App.tsx | 67 + GUI/src/assets/BackArrowButton.tsx | 31 + GUI/src/assets/DataModelsIcon.tsx | 20 + GUI/src/assets/DatabaseIcon.tsx | 37 + GUI/src/assets/Dataset.tsx | 18 + GUI/src/assets/IncomingTextsIcon.tsx | 20 + GUI/src/assets/IntegrationIcon.tsx | 42 + GUI/src/assets/Jira.tsx | 55 + GUI/src/assets/Outlook.tsx | 25 + GUI/src/assets/TestModelIcon.tsx | 32 + GUI/src/assets/UserIcon.tsx | 30 + GUI/src/assets/logo-white.svg | 29 + GUI/src/assets/logo.svg | 31 + GUI/src/assets/newMessageSound.mp3 | Bin 0 -> 20942 bytes GUI/src/components/Box/Box.scss | 56 + GUI/src/components/Box/index.tsx | 16 + GUI/src/components/Button/Button.scss | 151 + GUI/src/components/Button/index.tsx | 56 + GUI/src/components/Card/Card.scss | 65 + GUI/src/components/Card/index.tsx | 39 + .../components/Collapsible/Collapsible.scss | 35 + GUI/src/components/Collapsible/index.tsx | 31 + GUI/src/components/DataTable/CloseIcon.tsx | 22 + GUI/src/components/DataTable/DataTable.scss | 197 + .../components/DataTable/DeboucedInput.scss | 11 + .../components/DataTable/DebouncedInput.tsx | 54 + GUI/src/components/DataTable/Filter.tsx | 65 + GUI/src/components/DataTable/index.tsx | 242 + GUI/src/components/Dialog/Dialog.scss | 63 + GUI/src/components/Dialog/index.tsx | 45 + GUI/src/components/Drawer/Drawer.scss | 40 + GUI/src/components/Drawer/index.tsx | 42 + GUI/src/components/FileUpload/index.tsx | 98 + .../FormElements/DynamicForm/index.tsx | 86 + .../FormCheckbox/FormCheckbox.scss | 57 + .../FormElements/FormCheckbox/index.tsx | 39 + .../FormCheckboxes/FormCheckboxes.scss | 68 + .../FormElements/FormCheckboxes/index.tsx | 77 + .../FormDatepicker/FormDatepicker.scss | 154 + .../FormElements/FormDatepicker/index.tsx | 98 + .../FormElements/FormInput/FormInput.scss | 97 + .../FormElements/FormInput/index.tsx | 50 + .../FormElements/FormRadios/FormRadios.scss | 76 + .../FormElements/FormRadios/index.tsx | 65 + .../FormSelect/FormMultiselect.tsx | 124 + .../FormElements/FormSelect/FormSelect.scss | 128 + .../FormElements/FormSelect/index.tsx | 148 + .../FormTextarea/FormTextarea.scss | 109 + .../FormElements/FormTextarea/index.tsx | 72 + .../FormElements/Switch/Switch.scss | 68 + .../components/FormElements/Switch/index.tsx | 68 + .../FormElements/SwitchBox/SwitchBox.scss | 45 + .../FormElements/SwitchBox/index.tsx | 44 + GUI/src/components/FormElements/index.tsx | 23 + GUI/src/components/Header/Header.scss | 10 + GUI/src/components/Header/index.tsx | 196 + GUI/src/components/Icon/Icon.scss | 17 + GUI/src/components/Icon/index.tsx | 26 + GUI/src/components/Label/Label.scss | 91 + GUI/src/components/Label/index.tsx | 40 + GUI/src/components/LabelChip/index.scss | 23 + GUI/src/components/LabelChip/index.tsx | 25 + GUI/src/components/Layout/Layout.scss | 28 + GUI/src/components/Layout/index.tsx | 23 + .../MainNavigation/MainNavigation.scss | 130 + GUI/src/components/MainNavigation/index.tsx | 140 + GUI/src/components/Popover/Popover.scss | 15 + GUI/src/components/Popover/index.tsx | 27 + GUI/src/components/ProgressBar/index.scss | 28 + GUI/src/components/ProgressBar/index.tsx | 26 + GUI/src/components/Section/Section.scss | 11 + GUI/src/components/Section/index.tsx | 13 + GUI/src/components/Toast/Toast.scss | 73 + GUI/src/components/Toast/index.tsx | 54 + GUI/src/components/Tooltip/Tooltip.scss | 16 + GUI/src/components/Tooltip/index.tsx | 28 + GUI/src/components/Track/index.tsx | 57 + GUI/src/components/index.tsx | 55 + .../CircularSpinner/CircularSpinner.tsx | 19 + .../molecules/CircularSpinner/Spinner.scss | 23 + .../DatasetGroupCard/DatasetGroupCard.scss | 91 + .../molecules/DatasetGroupCard/index.tsx | 144 + .../molecules/NoDataView/NoDataView.scss | 7 + .../components/molecules/NoDataView/index.tsx | 24 + .../molecules/Pagination/Pagination.scss | 194 + .../components/molecules/Pagination/index.tsx | 66 + .../TableSkeleton/SkeletonTable.scss | 31 + .../molecules/TableSkeleton/TableSkeleton.tsx | 24 + .../UserManagementActionButtons.tsx | 91 + GUI/src/config/rolesConfig.json | 4 + GUI/src/constants/config.ts | 5 + GUI/src/constants/menuIcons.tsx | 24 + GUI/src/context/DialogContext.tsx | 83 + GUI/src/context/ToastContext.tsx | 58 + GUI/src/enums/commonEnums.ts | 18 + GUI/src/enums/correctedTextsEnums.ts | 5 + GUI/src/enums/dataModelsEnums.ts | 31 + GUI/src/enums/datasetEnums.ts | 56 + GUI/src/enums/roles.ts | 4 + GUI/src/hoc/with-authorization.tsx | 29 + GUI/src/hooks/useDialog.tsx | 4 + GUI/src/hooks/useDocumentEscapeListener.tsx | 17 + GUI/src/hooks/useOptionLists.tsx | 26 + GUI/src/hooks/useToast.tsx | 5 + GUI/src/main.tsx | 51 + GUI/src/model/ruuter-response-model.ts | 11 + .../pages/LoadingScreen/LoadingScreen.scss | 20 + GUI/src/pages/LoadingScreen/LoadingScreen.tsx | 12 + GUI/src/pages/Unauthorized/unauthorized.scss | 30 + GUI/src/pages/Unauthorized/unauthorized.tsx | 17 + .../pages/UserManagement/SettingsUsers.scss | 48 + .../pages/UserManagement/UserManagement.scss | 28 + GUI/src/pages/UserManagement/UserModal.tsx | 298 + GUI/src/pages/UserManagement/index.tsx | 191 + GUI/src/services/api-dev.ts | 39 + GUI/src/services/api-external.ts | 36 + GUI/src/services/api.ts | 36 + GUI/src/services/users.ts | 48 + GUI/src/static/icons/link-external-blue.svg | 8 + GUI/src/static/icons/link-external-white.svg | 1 + GUI/src/store/index.ts | 16 + GUI/src/styles/components/_vertical-tabs.scss | 119 + GUI/src/styles/generic/_base.scss | 169 + GUI/src/styles/generic/_fonts.scss | 15 + GUI/src/styles/generic/_reset.scss | 145 + GUI/src/styles/main.scss | 21 + GUI/src/styles/settings/_mixins.scss | 23 + GUI/src/styles/settings/_utility-classes.scss | 3 + .../settings/variables/_breakpoints.scss | 9 + .../styles/settings/variables/_colors.scss | 155 + GUI/src/styles/settings/variables/_grid.scss | 3 + GUI/src/styles/settings/variables/_other.scss | 16 + .../styles/settings/variables/_spacing.scss | 21 + .../settings/variables/_typography.scss | 22 + GUI/src/styles/tools/_color.scss | 4 + GUI/src/styles/tools/_spacing.scss | 4 + GUI/src/types/authorities.ts | 8 + GUI/src/types/chat.ts | 108 + GUI/src/types/common.ts | 6 + GUI/src/types/correctedTextTypes.ts | 12 + GUI/src/types/dataModels.ts | 98 + GUI/src/types/datasetGroups.ts | 168 + GUI/src/types/emergencyNotice.ts | 6 + GUI/src/types/establishment.ts | 4 + GUI/src/types/integration.ts | 4 + GUI/src/types/mainNavigation.ts | 14 + GUI/src/types/message.ts | 64 + GUI/src/types/organizationWorkingTime.ts | 22 + GUI/src/types/router.ts | 4 + GUI/src/types/service.ts | 6 + GUI/src/types/session.ts | 7 + GUI/src/types/testModel.ts | 6 + GUI/src/types/testModelTypes.ts | 22 + GUI/src/types/user.ts | 17 + GUI/src/types/userInfo.ts | 16 + GUI/src/types/userProfileSettings.ts | 10 + GUI/src/types/widgetConfig.ts | 8 + GUI/src/utils/commonUtilts.ts | 76 + GUI/src/utils/constants.ts | 10 + GUI/src/utils/dataTableUtils.ts | 44 + GUI/src/utils/endpoints.ts | 73 + GUI/src/utils/format-bytes.ts | 8 + GUI/src/utils/generateUEID.ts | 8 + GUI/src/utils/local-storage-utils.ts | 17 + GUI/src/utils/queryKeys.ts | 103 + GUI/src/vite-env.d.ts | 2 + GUI/translations/en/common.json | 483 + GUI/translations/et/common.json | 481 + GUI/tsconfig.json | 36 + GUI/tsconfig.node.json | 9 + GUI/vite.config.ts | 43 + GUI/vitePlugin.js | 25 + Global-Classifier/GUI/.eslintignore | 7 + Global-Classifier/GUI/.eslintrc.js | 28 + .../GUI/package.json (to be modified) | 16 + .../GUI/src/components/Header/Header.test.tsx | 8 + docker-compose.yml | 67 +- migrate.sh | 2 +- 225 files changed, 26857 insertions(+), 22 deletions(-) delete mode 100644 .github/workflows/workflow.yml create mode 100644 DSL/Liquibase/changelog/classifier-script-v5-configuration.sql create mode 100644 DSL/Liquibase/changelog/global-classifier-script-v3-user-management.sql create mode 100644 DSL/Liquibase/changelog/global-classifier-script-v4-authority-data.xml create mode 100644 DSL/Liquibase/data/authority.csv create mode 100644 DSL/Resql/global-classifier/POST/delete-user.sql create mode 100644 DSL/Resql/global-classifier/POST/get-configuration.sql create mode 100644 DSL/Resql/global-classifier/POST/get-user-role.sql create mode 100644 DSL/Resql/global-classifier/POST/get-user-with-roles.sql create mode 100644 DSL/Resql/global-classifier/POST/get-user.sql create mode 100644 DSL/Resql/global-classifier/POST/get-users-with-roles-by-role.sql create mode 100644 DSL/Resql/global-classifier/POST/insert-user-role.sql create mode 100644 DSL/Resql/global-classifier/POST/insert-user.sql create mode 100644 DSL/Resql/global-classifier/POST/update-user.sql create mode 100644 DSL/Ruuter.private/global-classifier/GET/.guard create mode 100644 DSL/Ruuter.private/global-classifier/GET/accounts/logout.yml create mode 100644 DSL/Ruuter.private/global-classifier/GET/accounts/user-role.yml create mode 100644 DSL/Ruuter.private/global-classifier/GET/auth/jwt/extend.yml create mode 100644 DSL/Ruuter.private/global-classifier/GET/auth/jwt/userinfo.yml create mode 100644 DSL/Ruuter.private/global-classifier/POST/.guard create mode 100644 DSL/Ruuter.private/global-classifier/POST/accounts/.guard create mode 100644 DSL/Ruuter.private/global-classifier/POST/accounts/add.yml create mode 100644 DSL/Ruuter.private/global-classifier/POST/accounts/delete.yml create mode 100644 DSL/Ruuter.private/global-classifier/POST/accounts/edit.yml create mode 100644 DSL/Ruuter.private/global-classifier/POST/accounts/exists.yml create mode 100644 DSL/Ruuter.private/global-classifier/POST/accounts/users.yml create mode 100644 DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority-admin.yml create mode 100644 DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority.yml create mode 100644 DSL/Ruuter.public/global-classifier/POST/auth/login.yml create mode 100644 GUI/.dockerignore create mode 100644 GUI/.env.development create mode 100644 GUI/.eslintrc.json create mode 100644 GUI/.gitignore create mode 100644 GUI/.prettierignore create mode 100644 GUI/.prettierrc create mode 100644 GUI/Dockerfile.dev create mode 100644 GUI/docker-compose.yml create mode 100644 GUI/entrypoint.sh create mode 100644 GUI/i18n.ts create mode 100644 GUI/index.html create mode 100644 GUI/package-lock.json create mode 100644 GUI/package.json create mode 100644 GUI/public/favicon.ico create mode 100644 GUI/public/mockServiceWorker.js create mode 100644 GUI/rebuild.sh create mode 100644 GUI/src/App.tsx create mode 100644 GUI/src/assets/BackArrowButton.tsx create mode 100644 GUI/src/assets/DataModelsIcon.tsx create mode 100644 GUI/src/assets/DatabaseIcon.tsx create mode 100644 GUI/src/assets/Dataset.tsx create mode 100644 GUI/src/assets/IncomingTextsIcon.tsx create mode 100644 GUI/src/assets/IntegrationIcon.tsx create mode 100644 GUI/src/assets/Jira.tsx create mode 100644 GUI/src/assets/Outlook.tsx create mode 100644 GUI/src/assets/TestModelIcon.tsx create mode 100644 GUI/src/assets/UserIcon.tsx create mode 100644 GUI/src/assets/logo-white.svg create mode 100644 GUI/src/assets/logo.svg create mode 100644 GUI/src/assets/newMessageSound.mp3 create mode 100644 GUI/src/components/Box/Box.scss create mode 100644 GUI/src/components/Box/index.tsx create mode 100644 GUI/src/components/Button/Button.scss create mode 100644 GUI/src/components/Button/index.tsx create mode 100644 GUI/src/components/Card/Card.scss create mode 100644 GUI/src/components/Card/index.tsx create mode 100644 GUI/src/components/Collapsible/Collapsible.scss create mode 100644 GUI/src/components/Collapsible/index.tsx create mode 100644 GUI/src/components/DataTable/CloseIcon.tsx create mode 100644 GUI/src/components/DataTable/DataTable.scss create mode 100644 GUI/src/components/DataTable/DeboucedInput.scss create mode 100644 GUI/src/components/DataTable/DebouncedInput.tsx create mode 100644 GUI/src/components/DataTable/Filter.tsx create mode 100644 GUI/src/components/DataTable/index.tsx create mode 100644 GUI/src/components/Dialog/Dialog.scss create mode 100644 GUI/src/components/Dialog/index.tsx create mode 100644 GUI/src/components/Drawer/Drawer.scss create mode 100644 GUI/src/components/Drawer/index.tsx create mode 100644 GUI/src/components/FileUpload/index.tsx create mode 100644 GUI/src/components/FormElements/DynamicForm/index.tsx create mode 100644 GUI/src/components/FormElements/FormCheckbox/FormCheckbox.scss create mode 100644 GUI/src/components/FormElements/FormCheckbox/index.tsx create mode 100644 GUI/src/components/FormElements/FormCheckboxes/FormCheckboxes.scss create mode 100644 GUI/src/components/FormElements/FormCheckboxes/index.tsx create mode 100644 GUI/src/components/FormElements/FormDatepicker/FormDatepicker.scss create mode 100644 GUI/src/components/FormElements/FormDatepicker/index.tsx create mode 100644 GUI/src/components/FormElements/FormInput/FormInput.scss create mode 100644 GUI/src/components/FormElements/FormInput/index.tsx create mode 100644 GUI/src/components/FormElements/FormRadios/FormRadios.scss create mode 100644 GUI/src/components/FormElements/FormRadios/index.tsx create mode 100644 GUI/src/components/FormElements/FormSelect/FormMultiselect.tsx create mode 100644 GUI/src/components/FormElements/FormSelect/FormSelect.scss create mode 100644 GUI/src/components/FormElements/FormSelect/index.tsx create mode 100644 GUI/src/components/FormElements/FormTextarea/FormTextarea.scss create mode 100644 GUI/src/components/FormElements/FormTextarea/index.tsx create mode 100644 GUI/src/components/FormElements/Switch/Switch.scss create mode 100644 GUI/src/components/FormElements/Switch/index.tsx create mode 100644 GUI/src/components/FormElements/SwitchBox/SwitchBox.scss create mode 100644 GUI/src/components/FormElements/SwitchBox/index.tsx create mode 100644 GUI/src/components/FormElements/index.tsx create mode 100644 GUI/src/components/Header/Header.scss create mode 100644 GUI/src/components/Header/index.tsx create mode 100644 GUI/src/components/Icon/Icon.scss create mode 100644 GUI/src/components/Icon/index.tsx create mode 100644 GUI/src/components/Label/Label.scss create mode 100644 GUI/src/components/Label/index.tsx create mode 100644 GUI/src/components/LabelChip/index.scss create mode 100644 GUI/src/components/LabelChip/index.tsx create mode 100644 GUI/src/components/Layout/Layout.scss create mode 100644 GUI/src/components/Layout/index.tsx create mode 100644 GUI/src/components/MainNavigation/MainNavigation.scss create mode 100644 GUI/src/components/MainNavigation/index.tsx create mode 100644 GUI/src/components/Popover/Popover.scss create mode 100644 GUI/src/components/Popover/index.tsx create mode 100644 GUI/src/components/ProgressBar/index.scss create mode 100644 GUI/src/components/ProgressBar/index.tsx create mode 100644 GUI/src/components/Section/Section.scss create mode 100644 GUI/src/components/Section/index.tsx create mode 100644 GUI/src/components/Toast/Toast.scss create mode 100644 GUI/src/components/Toast/index.tsx create mode 100644 GUI/src/components/Tooltip/Tooltip.scss create mode 100644 GUI/src/components/Tooltip/index.tsx create mode 100644 GUI/src/components/Track/index.tsx create mode 100644 GUI/src/components/index.tsx create mode 100644 GUI/src/components/molecules/CircularSpinner/CircularSpinner.tsx create mode 100644 GUI/src/components/molecules/CircularSpinner/Spinner.scss create mode 100644 GUI/src/components/molecules/DatasetGroupCard/DatasetGroupCard.scss create mode 100644 GUI/src/components/molecules/DatasetGroupCard/index.tsx create mode 100644 GUI/src/components/molecules/NoDataView/NoDataView.scss create mode 100644 GUI/src/components/molecules/NoDataView/index.tsx create mode 100644 GUI/src/components/molecules/Pagination/Pagination.scss create mode 100644 GUI/src/components/molecules/Pagination/index.tsx create mode 100644 GUI/src/components/molecules/TableSkeleton/SkeletonTable.scss create mode 100644 GUI/src/components/molecules/TableSkeleton/TableSkeleton.tsx create mode 100644 GUI/src/components/molecules/UserManagementActionButtons/UserManagementActionButtons.tsx create mode 100644 GUI/src/config/rolesConfig.json create mode 100644 GUI/src/constants/config.ts create mode 100644 GUI/src/constants/menuIcons.tsx create mode 100644 GUI/src/context/DialogContext.tsx create mode 100644 GUI/src/context/ToastContext.tsx create mode 100644 GUI/src/enums/commonEnums.ts create mode 100644 GUI/src/enums/correctedTextsEnums.ts create mode 100644 GUI/src/enums/dataModelsEnums.ts create mode 100644 GUI/src/enums/datasetEnums.ts create mode 100644 GUI/src/enums/roles.ts create mode 100644 GUI/src/hoc/with-authorization.tsx create mode 100644 GUI/src/hooks/useDialog.tsx create mode 100644 GUI/src/hooks/useDocumentEscapeListener.tsx create mode 100644 GUI/src/hooks/useOptionLists.tsx create mode 100644 GUI/src/hooks/useToast.tsx create mode 100644 GUI/src/main.tsx create mode 100644 GUI/src/model/ruuter-response-model.ts create mode 100644 GUI/src/pages/LoadingScreen/LoadingScreen.scss create mode 100644 GUI/src/pages/LoadingScreen/LoadingScreen.tsx create mode 100644 GUI/src/pages/Unauthorized/unauthorized.scss create mode 100644 GUI/src/pages/Unauthorized/unauthorized.tsx create mode 100644 GUI/src/pages/UserManagement/SettingsUsers.scss create mode 100644 GUI/src/pages/UserManagement/UserManagement.scss create mode 100644 GUI/src/pages/UserManagement/UserModal.tsx create mode 100644 GUI/src/pages/UserManagement/index.tsx create mode 100644 GUI/src/services/api-dev.ts create mode 100644 GUI/src/services/api-external.ts create mode 100644 GUI/src/services/api.ts create mode 100644 GUI/src/services/users.ts create mode 100644 GUI/src/static/icons/link-external-blue.svg create mode 100644 GUI/src/static/icons/link-external-white.svg create mode 100644 GUI/src/store/index.ts create mode 100644 GUI/src/styles/components/_vertical-tabs.scss create mode 100644 GUI/src/styles/generic/_base.scss create mode 100644 GUI/src/styles/generic/_fonts.scss create mode 100644 GUI/src/styles/generic/_reset.scss create mode 100644 GUI/src/styles/main.scss create mode 100644 GUI/src/styles/settings/_mixins.scss create mode 100644 GUI/src/styles/settings/_utility-classes.scss create mode 100644 GUI/src/styles/settings/variables/_breakpoints.scss create mode 100644 GUI/src/styles/settings/variables/_colors.scss create mode 100644 GUI/src/styles/settings/variables/_grid.scss create mode 100644 GUI/src/styles/settings/variables/_other.scss create mode 100644 GUI/src/styles/settings/variables/_spacing.scss create mode 100644 GUI/src/styles/settings/variables/_typography.scss create mode 100644 GUI/src/styles/tools/_color.scss create mode 100644 GUI/src/styles/tools/_spacing.scss create mode 100644 GUI/src/types/authorities.ts create mode 100644 GUI/src/types/chat.ts create mode 100644 GUI/src/types/common.ts create mode 100644 GUI/src/types/correctedTextTypes.ts create mode 100644 GUI/src/types/dataModels.ts create mode 100644 GUI/src/types/datasetGroups.ts create mode 100644 GUI/src/types/emergencyNotice.ts create mode 100644 GUI/src/types/establishment.ts create mode 100644 GUI/src/types/integration.ts create mode 100644 GUI/src/types/mainNavigation.ts create mode 100644 GUI/src/types/message.ts create mode 100644 GUI/src/types/organizationWorkingTime.ts create mode 100644 GUI/src/types/router.ts create mode 100644 GUI/src/types/service.ts create mode 100644 GUI/src/types/session.ts create mode 100644 GUI/src/types/testModel.ts create mode 100644 GUI/src/types/testModelTypes.ts create mode 100644 GUI/src/types/user.ts create mode 100644 GUI/src/types/userInfo.ts create mode 100644 GUI/src/types/userProfileSettings.ts create mode 100644 GUI/src/types/widgetConfig.ts create mode 100644 GUI/src/utils/commonUtilts.ts create mode 100644 GUI/src/utils/constants.ts create mode 100644 GUI/src/utils/dataTableUtils.ts create mode 100644 GUI/src/utils/endpoints.ts create mode 100644 GUI/src/utils/format-bytes.ts create mode 100644 GUI/src/utils/generateUEID.ts create mode 100644 GUI/src/utils/local-storage-utils.ts create mode 100644 GUI/src/utils/queryKeys.ts create mode 100644 GUI/src/vite-env.d.ts create mode 100644 GUI/translations/en/common.json create mode 100644 GUI/translations/et/common.json create mode 100644 GUI/tsconfig.json create mode 100644 GUI/tsconfig.node.json create mode 100644 GUI/vite.config.ts create mode 100644 GUI/vitePlugin.js create mode 100644 Global-Classifier/GUI/.eslintignore create mode 100644 Global-Classifier/GUI/.eslintrc.js create mode 100644 Global-Classifier/GUI/package.json (to be modified) create mode 100644 Global-Classifier/GUI/src/components/Header/Header.test.tsx diff --git a/.github/workflows/ci-cd-development.yml b/.github/workflows/ci-cd-development.yml index d5b01599..43fe3633 100644 --- a/.github/workflows/ci-cd-development.yml +++ b/.github/workflows/ci-cd-development.yml @@ -4,11 +4,39 @@ on: push: branches: - dev + pull_request: + branches: + - dev jobs: + lint: + name: Run ESLint + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: '18' + cache: 'npm' + cache-dependency-path: 'GUI/package-lock.json' + + - name: Install dependencies + working-directory: ./GUI + run: npm ci + + - name: Lint code + working-directory: ./GUI + run: npx eslint "src/**/*.{ts,tsx}" --max-warnings=0 + deploy: runs-on: [self-hosted, development] - + if: github.event_name == 'push' + steps: - name: Checkout code uses: actions/checkout@v3 @@ -16,6 +44,7 @@ jobs: - name: Stop and remove existing containers run: | docker compose down + - name: Build & up Docker container run: | docker compose up --build -d \ No newline at end of file diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/DSL/Liquibase/changelog/classifier-script-v5-configuration.sql b/DSL/Liquibase/changelog/classifier-script-v5-configuration.sql new file mode 100644 index 00000000..8b3ab1be --- /dev/null +++ b/DSL/Liquibase/changelog/classifier-script-v5-configuration.sql @@ -0,0 +1,15 @@ +-- liquibase formatted sql + +-- changeset kalsara Magamage:classifier-script-v4-changeset1 +CREATE TABLE public.configuration ( + id BIGINT NOT NULL GENERATED BY DEFAULT AS IDENTITY, + key VARCHAR(128), + value VARCHAR(128), + created TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + deleted BOOLEAN NOT NULL DEFAULT FALSE, + CONSTRAINT configuration_pkey PRIMARY KEY (id) +); + +-- changeset kalsara Magamage:classifier-script-v4-changeset2 +INSERT INTO public.configuration (key, value) +VALUES ('session_length', '120'); diff --git a/DSL/Liquibase/changelog/global-classifier-script-v3-user-management.sql b/DSL/Liquibase/changelog/global-classifier-script-v3-user-management.sql new file mode 100644 index 00000000..91d4cfac --- /dev/null +++ b/DSL/Liquibase/changelog/global-classifier-script-v3-user-management.sql @@ -0,0 +1,41 @@ +-- liquibase formatted sql + +-- changeset kalsara Magamage:classifier-script-v1-changeset1 +CREATE TYPE user_status AS ENUM ('active','deleted'); + +-- changeset kalsara Magamage:classifier-script-v1-changeset2 +CREATE TABLE public."user" ( + id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY, + login VARCHAR(50) NOT NULL, + password_hash VARCHAR(60), + first_name VARCHAR(50), + last_name VARCHAR(50), + id_code VARCHAR(50) NOT NULL, + display_name VARCHAR(50), + status user_status, + csa_title VARCHAR, + csa_email VARCHAR, + created TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT user_pkey PRIMARY KEY (id) +); + +CREATE TABLE public."authority" ( + name VARCHAR(50) PRIMARY KEY +); + +CREATE TABLE public."user_authority" ( + id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY, + user_id VARCHAR(50) NOT NULL, + authority_name VARCHAR[] NOT NULL, + created TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT user_authority_pkey PRIMARY KEY (id) +); + +-- changeset kalsara Magamage:classifier-script-v1-changeset3 + +INSERT INTO public."user" (login,password_hash,first_name,last_name,id_code,display_name,status,csa_title,csa_email) +VALUES ('EE30303039914','ok','classifier','test','EE30303039914','classifier','active','Title','classifier.doe@example.com'); + +INSERT INTO public."user_authority" ( user_id, authority_name) +VALUES ('EE30303039914', ARRAY['ROLE_ADMINISTRATOR', 'ROLE_MODEL_TRAINER'] ); + diff --git a/DSL/Liquibase/changelog/global-classifier-script-v4-authority-data.xml b/DSL/Liquibase/changelog/global-classifier-script-v4-authority-data.xml new file mode 100644 index 00000000..b54c2f3c --- /dev/null +++ b/DSL/Liquibase/changelog/global-classifier-script-v4-authority-data.xml @@ -0,0 +1,17 @@ + + + + + + + + + \ No newline at end of file diff --git a/DSL/Liquibase/data/authority.csv b/DSL/Liquibase/data/authority.csv new file mode 100644 index 00000000..c110c607 --- /dev/null +++ b/DSL/Liquibase/data/authority.csv @@ -0,0 +1,3 @@ +name +ROLE_ADMINISTRATOR +ROLE_MODEL_TRAINER diff --git a/DSL/Liquibase/master.yml b/DSL/Liquibase/master.yml index 0861e812..3ce4e35c 100644 --- a/DSL/Liquibase/master.yml +++ b/DSL/Liquibase/master.yml @@ -2,4 +2,11 @@ databaseChangeLog: - include: file: changelog/global-classifier-script-v1-classification-results.sql - include: - file: changelog/global-classifier-script-v2-classification-feedback.sql \ No newline at end of file + file: changelog/global-classifier-script-v2-classification-feedback.sql + - include: + file: changelog/global-classifier-script-v3-user-management.sql + - include: + file: changelog/classifier-script-v5-configuration.sql + - include: + file: changelog/global-classifier-script-v4-authority-data.xml + \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/delete-user.sql b/DSL/Resql/global-classifier/POST/delete-user.sql new file mode 100644 index 00000000..eb8ccade --- /dev/null +++ b/DSL/Resql/global-classifier/POST/delete-user.sql @@ -0,0 +1,36 @@ +WITH active_administrators AS (SELECT user_id + FROM user_authority + WHERE 'ROLE_ADMINISTRATOR' = ANY (authority_name) + AND id IN (SELECT max(id) + FROM user_authority + GROUP BY user_id)), +delete_user AS ( +INSERT +INTO "user" (login, password_hash, first_name, last_name, id_code, display_name, status, created, csa_title, csa_email) +SELECT login, + password_hash, + first_name, + last_name, + id_code, + display_name, + 'deleted', + :created::timestamp with time zone, + csa_title, + csa_email +FROM "user" +WHERE id_code = :userIdCode + AND status <> 'deleted' + AND id IN (SELECT max(id) FROM "user" WHERE id_code = :userIdCode) + AND (1 < (SELECT COUNT(user_id) FROM active_administrators) + OR (1 = (SELECT COUNT(user_id) FROM active_administrators) + AND :userIdCode NOT IN (SELECT user_id FROM active_administrators)))), +delete_authority AS ( +INSERT +INTO user_authority (user_id, authority_name, created) +SELECT :userIdCode as users, ARRAY []::varchar[], :created::timestamp with time zone +FROM user_authority +WHERE 1 < (SELECT COUNT(user_id) FROM active_administrators) + OR (1 = (SELECT COUNT(user_id) FROM active_administrators) + AND :userIdCode NOT IN (SELECT user_id FROM active_administrators)) +GROUP BY users) +SELECT max(status) FROM "user" WHERE id_code = :userIdCode; diff --git a/DSL/Resql/global-classifier/POST/get-configuration.sql b/DSL/Resql/global-classifier/POST/get-configuration.sql new file mode 100644 index 00000000..f03b322e --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-configuration.sql @@ -0,0 +1,5 @@ +SELECT id, key, value +FROM configuration +WHERE key=:key +AND id IN (SELECT max(id) from configuration GROUP BY key) +AND NOT deleted; diff --git a/DSL/Resql/global-classifier/POST/get-user-role.sql b/DSL/Resql/global-classifier/POST/get-user-role.sql new file mode 100644 index 00000000..39a51f4e --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-user-role.sql @@ -0,0 +1,10 @@ +SELECT ua.authority_name AS authorities +FROM "user" u + INNER JOIN (SELECT authority_name, user_id + FROM user_authority AS ua + WHERE ua.id IN (SELECT max(id) + FROM user_authority + GROUP BY user_id)) ua ON u.id_code = ua.user_id +WHERE u.id_code = :userIdCode + AND status <> 'deleted' + AND id IN (SELECT max(id) FROM "user" WHERE id_code = :userIdCode) diff --git a/DSL/Resql/global-classifier/POST/get-user-with-roles.sql b/DSL/Resql/global-classifier/POST/get-user-with-roles.sql new file mode 100644 index 00000000..8ef5044c --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-user-with-roles.sql @@ -0,0 +1,15 @@ +SELECT DISTINCT u.login, + u.first_name, + u.last_name, + u.id_code, + u.display_name, + u.csa_title, + u.csa_email, + ua.authority_name AS authorities +FROM "user" u + LEFT JOIN (SELECT authority_name, user_id + FROM user_authority AS ua + WHERE ua.id IN (SELECT max(id) + FROM user_authority + GROUP BY user_id)) ua ON u.id_code = ua.user_id +WHERE login = :login; diff --git a/DSL/Resql/global-classifier/POST/get-user.sql b/DSL/Resql/global-classifier/POST/get-user.sql new file mode 100644 index 00000000..18bef7ff --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-user.sql @@ -0,0 +1,5 @@ +SELECT id_code +FROM "user" +WHERE id_code = :userIdCode + AND status <> 'deleted' + AND id IN (SELECT max(id) FROM "user" WHERE id_code = :userIdCode) \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-users-with-roles-by-role.sql b/DSL/Resql/global-classifier/POST/get-users-with-roles-by-role.sql new file mode 100644 index 00000000..50ec5199 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-users-with-roles-by-role.sql @@ -0,0 +1,41 @@ +SELECT u.login, + u.first_name, + u.last_name, + u.id_code AS userIdCode, + u.display_name, + u.csa_title, + u.csa_email, + ua.authority_name AS authorities, + CEIL(COUNT(*) OVER() / :page_size::DECIMAL) AS total_pages +FROM "user" u +LEFT JOIN ( + SELECT authority_name, user_id, ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY id DESC) AS rn + FROM user_authority AS ua + WHERE authority_name && ARRAY [ :roles ]::character varying array + AND ua.id IN ( + SELECT max(id) + FROM user_authority + GROUP BY user_id + ) +) ua ON u.id_code = ua.user_id +WHERE u.status <> 'deleted' + AND array_length(authority_name, 1) > 0 + AND u.id IN ( + SELECT max(id) + FROM "user" + GROUP BY id_code + ) +ORDER BY + CASE WHEN :sorting = 'name asc' THEN u.first_name END ASC, + CASE WHEN :sorting = 'name desc' THEN u.first_name END DESC, + CASE WHEN :sorting = 'idCode asc' THEN u.id_code END ASC, + CASE WHEN :sorting = 'idCode desc' THEN u.id_code END DESC, + CASE WHEN :sorting = 'Role asc' THEN ua.authority_name END ASC, + CASE WHEN :sorting = 'Role desc' THEN ua.authority_name END DESC, + CASE WHEN :sorting = 'displayName asc' THEN u.display_name END ASC, + CASE WHEN :sorting = 'displayName desc' THEN u.display_name END DESC, + CASE WHEN :sorting = 'csaTitle asc' THEN u.csa_title END ASC, + CASE WHEN :sorting = 'csaTitle desc' THEN u.csa_title END DESC, + CASE WHEN :sorting = 'csaEmail asc' THEN u.csa_email END ASC, + CASE WHEN :sorting = 'csaEmail desc' THEN u.csa_email END DESC +OFFSET ((GREATEST(:page, 1) - 1) * :page_size) LIMIT :page_size; diff --git a/DSL/Resql/global-classifier/POST/insert-user-role.sql b/DSL/Resql/global-classifier/POST/insert-user-role.sql new file mode 100644 index 00000000..e2bfe3b4 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/insert-user-role.sql @@ -0,0 +1,2 @@ +INSERT INTO user_authority (user_id, authority_name, created) +VALUES (:userIdCode, ARRAY [ :roles ], :created::timestamp with time zone); \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/insert-user.sql b/DSL/Resql/global-classifier/POST/insert-user.sql new file mode 100644 index 00000000..0fd7c12b --- /dev/null +++ b/DSL/Resql/global-classifier/POST/insert-user.sql @@ -0,0 +1,2 @@ +INSERT INTO "user" (login, first_name, last_name, display_name, password_hash, id_code, status, created, csa_title, csa_email) +VALUES (:userIdCode, :firstName, :lastName, :displayName, :displayName, :userIdCode, (:status)::user_status, :created::timestamp with time zone, :csaTitle, :csaEmail); diff --git a/DSL/Resql/global-classifier/POST/update-user.sql b/DSL/Resql/global-classifier/POST/update-user.sql new file mode 100644 index 00000000..688e8df7 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/update-user.sql @@ -0,0 +1,16 @@ +INSERT INTO "user" (id_code, login, password_hash, first_name, last_name, display_name, status, created, csa_title, csa_email) +SELECT + :userIdCode, + login, + password_hash, + :firstName, + :lastName, + :displayName, + :status::user_status, + :created::timestamp with time zone, + :csaTitle, + :csaEmail +FROM "user" +WHERE id = ( + SELECT MAX(id) FROM "user" WHERE id_code = :userIdCode +); diff --git a/DSL/Ruuter.private/global-classifier/GET/.guard b/DSL/Ruuter.private/global-classifier/GET/.guard new file mode 100644 index 00000000..9d21179b --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/.guard @@ -0,0 +1,28 @@ +check_for_cookie: + switch: + - condition: ${incoming.headers == null || incoming.headers.cookie == null} + next: guard_fail + next: authenticate + +authenticate: + template: "[#GLOBAL_CLASSIFIER_PROJECT_LAYER]/check-user-authority" + requestType: templates + headers: + cookie: ${incoming.headers.cookie} + result: authority_result + +check_authority_result: + switch: + - condition: ${authority_result !== "false"} + next: guard_success + next: guard_fail + +guard_success: + return: "success" + status: 200 + next: end + +guard_fail: + return: "unauthorized" + status: 401 + next: end diff --git a/DSL/Ruuter.private/global-classifier/GET/accounts/logout.yml b/DSL/Ruuter.private/global-classifier/GET/accounts/logout.yml new file mode 100644 index 00000000..8284c80b --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/accounts/logout.yml @@ -0,0 +1,63 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'LOGOUT'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + headers: + - field: cookie + type: string + description: "Cookie field" + +get_user_info: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-userinfo" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: "customJwtCookie" + result: res + next: check_user_info_response + +check_user_info_response: + switch: + - condition: ${200 <= res.response.statusCodeValue && res.response.statusCodeValue < 300} + next: blacklistCustomJwt + next: return_bad_request + +blacklistCustomJwt: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-blacklist" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: "customJwtCookie" + result: blacklist_res + next: assign_cookie + +assign_cookie: + assign: + setCookie: + customJwtCookie: null + Domain: "[#DOMAIN]" + Max-Age: 0 + Secure: true + HttpOnly: true + SameSite: "Lax" + next: return_result + +return_result: + headers: + Set-Cookie: ${setCookie} + return: "Logged Out Successfully" + next: end + +return_bad_request: + return: "error: bad request" + status: 400 + next: end diff --git a/DSL/Ruuter.private/global-classifier/GET/accounts/user-role.yml b/DSL/Ruuter.private/global-classifier/GET/accounts/user-role.yml new file mode 100644 index 00000000..eb0ea7f2 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/accounts/user-role.yml @@ -0,0 +1,53 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'USER-ROLE'" + method: get + accepts: json + returns: json + namespace: global-classifier + allowlist: + headers: + - field: cookie + type: string + description: "Cookie field" + +get_user_info: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-userinfo" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: + "customJwtCookie" + result: res + next: check_user_info_response + +check_user_info_response: + switch: + - condition: ${200 <= res.response.statusCodeValue && res.response.statusCodeValue < 300} + next: assignIdCode + next: returnNotFound + +assignIdCode: + assign: + idCode: ${res.response.body.idCode} + next: getUserRole + +getUserRole: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-user-role" + body: + userIdCode: ${idCode} + result: roles_res + next: returnSuccess + +returnSuccess: + return: ${roles_res.response.body?.[0]?.authorities ?? []} + next: end + +returnNotFound: + return: "error: not found" + next: end diff --git a/DSL/Ruuter.private/global-classifier/GET/auth/jwt/extend.yml b/DSL/Ruuter.private/global-classifier/GET/auth/jwt/extend.yml new file mode 100644 index 00000000..c98c8cd9 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/auth/jwt/extend.yml @@ -0,0 +1,41 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'EXTEND'" + method: get + accepts: json + returns: json + namespace: global-classifier + allowlist: + headers: + - field: cookie + type: string + description: "Cookie field" + +extend_cookie: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-extend" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: + "customJwtCookie" + result: cookie_result + next: assign_cookie + +assign_cookie: + assign: + setCookie: + customJwtCookie: ${cookie_result.response.body.token} + Domain: "[#DOMAIN]" + Secure: true + HttpOnly: true + SameSite: "Lax" + next: return_value + +return_value: + headers: + Set-Cookie: ${setCookie} + return: ${cookie_result.response.body.token} + next: end diff --git a/DSL/Ruuter.private/global-classifier/GET/auth/jwt/userinfo.yml b/DSL/Ruuter.private/global-classifier/GET/auth/jwt/userinfo.yml new file mode 100644 index 00000000..ee4b5f3f --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/auth/jwt/userinfo.yml @@ -0,0 +1,27 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'USERINFO'" + method: get + accepts: json + returns: json + namespace: global-classifier + allowlist: + headers: + - field: cookie + type: string + description: "Cookie field" + +get_user_info: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-userinfo" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: + "customJwtCookie" + result: res + +return_result: + return: ${res.response.body} diff --git a/DSL/Ruuter.private/global-classifier/POST/.guard b/DSL/Ruuter.private/global-classifier/POST/.guard new file mode 100644 index 00000000..9d21179b --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/.guard @@ -0,0 +1,28 @@ +check_for_cookie: + switch: + - condition: ${incoming.headers == null || incoming.headers.cookie == null} + next: guard_fail + next: authenticate + +authenticate: + template: "[#GLOBAL_CLASSIFIER_PROJECT_LAYER]/check-user-authority" + requestType: templates + headers: + cookie: ${incoming.headers.cookie} + result: authority_result + +check_authority_result: + switch: + - condition: ${authority_result !== "false"} + next: guard_success + next: guard_fail + +guard_success: + return: "success" + status: 200 + next: end + +guard_fail: + return: "unauthorized" + status: 401 + next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/accounts/.guard b/DSL/Ruuter.private/global-classifier/POST/accounts/.guard new file mode 100644 index 00000000..be3aa511 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/accounts/.guard @@ -0,0 +1,28 @@ +check_for_cookie: + switch: + - condition: ${incoming.headers == null || incoming.headers.cookie == null} + next: guard_fail + next: authenticate + +authenticate: + template: "[#GLOBAL_CLASSIFIER_PROJECT_LAYER]/check-user-authority-admin" + requestType: templates + headers: + cookie: ${incoming.headers.cookie} + result: authority_result + +check_authority_result: + switch: + - condition: ${authority_result !== "false"} + next: guard_success + next: guard_fail + +guard_success: + return: "success" + status: 200 + next: end + +guard_fail: + return: "unauthorized" + status: 401 + next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/accounts/add.yml b/DSL/Ruuter.private/global-classifier/POST/accounts/add.yml new file mode 100644 index 00000000..1959c935 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/accounts/add.yml @@ -0,0 +1,89 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'ADD'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: csaTitle + type: string + description: "Body field 'csaTitle'" + - field: csa_email + type: string + description: "Body field 'csa_email'" + - field: firstName + type: string + description: "Body field 'firstName'" + - field: lastName + type: string + description: "Body field 'lastName'" + - field: roles + type: array + description: "Body field 'roles'" + - field: userIdCode + type: string + description: "Body field 'userIdCode'" + +extractRequestData: + assign: + firstName: ${incoming.body.firstName} + lastName: ${incoming.body.lastName} + userIdCode: ${incoming.body.userIdCode} + displayName: ${incoming.body.firstName} + csaTitle: ${incoming.body.csaTitle} + csa_email: ${incoming.body.csa_email} + roles: ${incoming.body.roles} + +getUser: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-user" + body: + userIdCode: ${userIdCode} + result: res + next: checkIfUserExists + +checkIfUserExists: + switch: + - condition: "${res.response.body.length > 0}" + next: return_exists + next: addUser + +addUser: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/insert-user" + body: + created: ${new Date().toISOString()} + status: "active" + firstName: ${firstName} + lastName: ${lastName} + userIdCode: ${userIdCode} + displayName: ${displayName} + csaTitle: ${csaTitle} + csaEmail: ${csa_email} + result: add_user_res + next: addRoles + +addRoles: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/insert-user-role" + body: + userIdCode: ${userIdCode} + roles: ${roles} + created: ${new Date().toISOString()} + result: add_roles_res + next: return_result + +return_result: + return: "User added successfully" + next: end + +return_exists: + return: "error: user already exists" + status: 400 + next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/accounts/delete.yml b/DSL/Ruuter.private/global-classifier/POST/accounts/delete.yml new file mode 100644 index 00000000..e1579062 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/accounts/delete.yml @@ -0,0 +1,29 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'DELETE'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: userIdCode + type: string + description: "Body field 'userIdCode'" + +extractRequestData: + assign: + userId: ${incoming.body.userIdCode} + +setConfigurationValue: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/delete-user" + body: + userIdCode: ${userId} + created: ${new Date().toISOString()} + result: res + +return_result: + return: ${res.response.body} diff --git a/DSL/Ruuter.private/global-classifier/POST/accounts/edit.yml b/DSL/Ruuter.private/global-classifier/POST/accounts/edit.yml new file mode 100644 index 00000000..455a5ab1 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/accounts/edit.yml @@ -0,0 +1,94 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'EDIT'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: csaTitle + type: string + description: "Body field 'csaTitle'" + - field: csa_email + type: string + description: "Body field 'csa_email'" + - field: displayName + type: string + description: "Body field 'displayName'" + - field: firstName + type: string + description: "Body field 'firstName'" + - field: lastName + type: string + description: "Body field 'lastName'" + - field: roles + type: array + description: "Body field 'roles'" + - field: userIdCode + type: string + description: "Body field 'userIdCode'" + +extractRequestData: + assign: + firstName: ${incoming.body.firstName} + lastName: ${incoming.body.lastName} + userIdCode: ${incoming.body.userIdCode} + displayName: ${incoming.body.displayName} + csaTitle: ${incoming.body.csaTitle} + csa_email: ${incoming.body.csa_email} + roles: ${incoming.body.roles} + +getUser: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-user" + body: + userIdCode: ${userIdCode} + result: res + next: checkIfUserExists + +checkIfUserExists: + switch: + - condition: "${res.response.body.length > 0}" + next: updateUser + next: return_not_exists + +updateUser: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/update-user" + body: + created: ${new Date().toISOString()} + status: "active" + firstName: ${firstName} + lastName: ${lastName} + userIdCode: ${userIdCode} + displayName: ${displayName} + csaTitle: ${csaTitle} + csaEmail: ${csa_email} + result: add_user_res + next: updateRoles + +updateRoles: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/insert-user-role" + body: + userIdCode: ${userIdCode} + roles: ${roles} + created: ${new Date().toISOString()} + result: add_roles_res + next: return_result + +return_result: + return: "User updated successfully" + status: 200 + next: end + +return_not_exists: + return: "error: user does not exist" + status: 400 + next: end + diff --git a/DSL/Ruuter.private/global-classifier/POST/accounts/exists.yml b/DSL/Ruuter.private/global-classifier/POST/accounts/exists.yml new file mode 100644 index 00000000..5998db49 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/accounts/exists.yml @@ -0,0 +1,40 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'EXISTS'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: userIdCode + type: string + description: "Body field 'userIdCode'" + +extractRequestData: + assign: + userId: ${incoming.body.userIdCode} + +getUser: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-user" + body: + userIdCode: ${userId} + result: res + next: checkIfUserExists + +checkIfUserExists: + switch: + - condition: "${res.response.body.length > 0}" + next: return_exists + next: return_not_exists + +return_exists: + return: "true" + next: end + +return_not_exists: + return: "false" + next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/accounts/users.yml b/DSL/Ruuter.private/global-classifier/POST/accounts/users.yml new file mode 100644 index 00000000..d9c8960f --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/accounts/users.yml @@ -0,0 +1,39 @@ +declaration: + call: declare + version: 0.1 + description: "Description placeholder for 'USERS'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: page + type: number + description: "Body field 'page'" + - field: page_size + type: number + description: "Body field 'page_size'" + - field: sorting + type: string + description: "Body field 'sorting'" + +getUsers: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-users-with-roles-by-role" + body: + page: ${incoming.body.page} + page_size: ${incoming.body.page_size} + sorting: ${incoming.body.sorting} + roles: + [ + "ROLE_ADMINISTRATOR", + "ROLE_MODEL_TRAINER" + ] + result: res + next: return_result + +return_result: + return: ${res.response.body} + next: end diff --git a/DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority-admin.yml b/DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority-admin.yml new file mode 100644 index 00000000..8864c184 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority-admin.yml @@ -0,0 +1,52 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'CHECK-USER-AUTHORITY'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + headers: + - field: cookie + type: string + description: "Cookie field" + +get_cookie_info: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-userinfo" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: "customJwtCookie" + result: res + next: check_cookie_info_response + +check_cookie_info_response: + switch: + - condition: ${200 <= res.response.statusCodeValue && res.response.statusCodeValue < 300} + next: check_user_authority + next: return_bad_request + +check_user_authority: + switch: + - condition: ${res.response.body === null} + next: return_unauthorized + - condition: ${res.response.body.authorities.includes("ROLE_ADMINISTRATOR")} + next: return_authorized + next: return_unauthorized + +return_authorized: + return: ${res.response.body} + next: end + +return_unauthorized: + status: 401 + return: false + next: end + +return_bad_request: + status: 400 + return: false + next: end diff --git a/DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority.yml b/DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority.yml new file mode 100644 index 00000000..80708e9b --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/TEMPLATES/check-user-authority.yml @@ -0,0 +1,52 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'CHECK-USER-AUTHORITY'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + headers: + - field: cookie + type: string + description: "Cookie field" + +get_cookie_info: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-userinfo" + contentType: plaintext + headers: + cookie: ${incoming.headers.cookie} + plaintext: "customJwtCookie" + result: res + next: check_cookie_info_response + +check_cookie_info_response: + switch: + - condition: ${200 <= res.response.statusCodeValue && res.response.statusCodeValue < 300} + next: check_user_authority + next: return_bad_request + +check_user_authority: + switch: + - condition: ${res.response.body === null} + next: return_unauthorized + - condition: ${res.response.body.authorities.includes("ROLE_ADMINISTRATOR") || res.response.body.authorities.includes("ROLE_MODEL_TRAINER")} + next: return_authorized + next: return_unauthorized + +return_authorized: + return: ${res.response.body} + next: end + +return_unauthorized: + status: 401 + return: false + next: end + +return_bad_request: + status: 400 + return: false + next: end diff --git a/DSL/Ruuter.public/global-classifier/POST/auth/login.yml b/DSL/Ruuter.public/global-classifier/POST/auth/login.yml new file mode 100644 index 00000000..d0809827 --- /dev/null +++ b/DSL/Ruuter.public/global-classifier/POST/auth/login.yml @@ -0,0 +1,79 @@ +declaration: + call: declare + version: 0.1 + description: "Decription placeholder for 'LOGIN'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: login + type: string + description: "Body field 'login'" + - field: password + type: string + description: "Body field 'password'" + +extractRequestData: + assign: + login: ${incoming.body.login} + password: ${incoming.body.password} + next: getUserWithRole + +getUserWithRole: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-user-with-roles" + body: + login: ${login} + password: ${password} + result: user_result + next: check_user_result + +check_user_result: + switch: + - condition: "${user_result.response.body.length > 0}" + next: get_session_length + next: return_user_not_found + +get_session_length: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-configuration" + body: + key: "session_length" + result: session_result + next: generate_cookie + +generate_cookie: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_TIM]/jwt/custom-jwt-generate" + body: + JWTName: "customJwtCookie" + expirationInMinutes: ${session_result.response.body[0]?.value ?? '120'} + content: ${user_result.response.body[0]} + result: cookie_result + next: assign_cookie + +assign_cookie: + assign: + setCookie: + customJwtCookie: ${cookie_result.response.body.token} + Domain: "[#DOMAIN]" + Secure: true + HttpOnly: true + SameSite: "Lax" + next: return_value + +return_value: + headers: + Set-Cookie: ${setCookie} + return: ${cookie_result.response.body.token} + next: end + +return_user_not_found: + status: 400 + return: "User Not Found" + next: end diff --git a/GUI/.dockerignore b/GUI/.dockerignore new file mode 100644 index 00000000..ab4f96a1 --- /dev/null +++ b/GUI/.dockerignore @@ -0,0 +1,7 @@ +node_modules +npm-debug.log +build +.git +*.md +.gitignore +.env.development diff --git a/GUI/.env.development b/GUI/.env.development new file mode 100644 index 00000000..7ff4d8bb --- /dev/null +++ b/GUI/.env.development @@ -0,0 +1,8 @@ +REACT_APP_RUUTER_API_URL=http://localhost:8086 +REACT_APP_RUUTER_PRIVATE_API_URL=http://localhost:8088 +REACT_APP_EXTERNAL_API_URL=http://localhost:8000 +REACT_APP_CUSTOMER_SERVICE_LOGIN=http://localhost:3004/et/dev-auth +REACT_APP_SERVICE_ID=conversations,settings,monitoring +REACT_APP_NOTIFICATION_NODE_URL=http://localhost:4040 +REACT_APP_CSP=upgrade-insecure-requests; default-src 'self'; font-src 'self' data:; img-src 'self' data:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' http://localhost:8086 http://localhost:8088 http://localhost:8085 http://localhost:4040; +REACT_APP_ENABLE_HIDDEN_FEATURES=TRUE \ No newline at end of file diff --git a/GUI/.eslintrc.json b/GUI/.eslintrc.json new file mode 100644 index 00000000..5e603ecd --- /dev/null +++ b/GUI/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "react-app" +} diff --git a/GUI/.gitignore b/GUI/.gitignore new file mode 100644 index 00000000..d79b5ca1 --- /dev/null +++ b/GUI/.gitignore @@ -0,0 +1,30 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# testing +/coverage + +# production +/build + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/GUI/.prettierignore b/GUI/.prettierignore new file mode 100644 index 00000000..3c3629e6 --- /dev/null +++ b/GUI/.prettierignore @@ -0,0 +1 @@ +node_modules diff --git a/GUI/.prettierrc b/GUI/.prettierrc new file mode 100644 index 00000000..0a725205 --- /dev/null +++ b/GUI/.prettierrc @@ -0,0 +1,6 @@ +{ + "trailingComma": "es5", + "tabWidth": 2, + "semi": true, + "singleQuote": true +} diff --git a/GUI/Dockerfile.dev b/GUI/Dockerfile.dev new file mode 100644 index 00000000..48b7890e --- /dev/null +++ b/GUI/Dockerfile.dev @@ -0,0 +1,14 @@ +FROM node:22.0.0-alpine AS image +WORKDIR /app +COPY ./package.json . + +FROM image AS build +RUN npm install --legacy-peer-deps --mode=development +COPY . . +RUN ./node_modules/.bin/vite build --mode=development + +EXPOSE 3001 + +ENV REACT_APP_ENABLE_HIDDEN_FEATURES TRUE + +CMD ["npm", "run", "dev"] diff --git a/GUI/docker-compose.yml b/GUI/docker-compose.yml new file mode 100644 index 00000000..87d6970c --- /dev/null +++ b/GUI/docker-compose.yml @@ -0,0 +1,10 @@ +version: "3.9" +services: + buerokratt_chatbot: + container_name: buerokratt_classifier + build: + context: . + target: web + entrypoint: "/opt/buerokratt-chatbot/rebuild.sh" + ports: + - '3001:3001' diff --git a/GUI/entrypoint.sh b/GUI/entrypoint.sh new file mode 100644 index 00000000..636848f7 --- /dev/null +++ b/GUI/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +# Replace environment variables in the Nginx configuration template +envsubst '$BASE_URL $REACT_APP_RUUTER_API_URL $REACT_APP_RUUTER_V1_PRIVATE_API_URL $REACT_APP_RUUTER_V2_PRIVATE_API_URL $REACT_APP_CUSTOMER_SERVICE_LOGIN $CHOKIDAR_USEPOLLING $PORT' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf + +# Start the Nginx server +nginx -g "daemon off;" diff --git a/GUI/i18n.ts b/GUI/i18n.ts new file mode 100644 index 00000000..6a4593d0 --- /dev/null +++ b/GUI/i18n.ts @@ -0,0 +1,26 @@ +import i18n from 'i18next'; +import { initReactI18next } from 'react-i18next'; +import LanguageDetector from 'i18next-browser-languagedetector'; + +import commonEN from './translations/en/common.json'; +import commonET from './translations/et/common.json'; + +i18n + .use(LanguageDetector) + .use(initReactI18next) + .init({ + debug: import.meta.env.NODE_ENV === 'development', + fallbackLng: 'et', + supportedLngs: ['et','en'], + resources: { + en: { + common: commonEN, + }, + et: { + common: commonET, + }, + }, + defaultNS: 'common', + }); + +export default i18n; diff --git a/GUI/index.html b/GUI/index.html new file mode 100644 index 00000000..047cff35 --- /dev/null +++ b/GUI/index.html @@ -0,0 +1,14 @@ + + + + + + + Bürokratt + + +
+
+ + + diff --git a/GUI/package-lock.json b/GUI/package-lock.json new file mode 100644 index 00000000..436ec9c4 --- /dev/null +++ b/GUI/package-lock.json @@ -0,0 +1,15860 @@ +{ + "name": "byk-training-module-gui", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "byk-training-module-gui", + "version": "0.0.0", + "dependencies": { + "@buerokratt-ria/styles": "^0.0.1", + "@fontsource/roboto": "^4.5.8", + "@formkit/auto-animate": "^1.0.0-beta.5", + "@fortaine/fetch-event-source": "^3.0.6", + "@radix-ui/react-accessible-icon": "^1.0.1", + "@radix-ui/react-collapsible": "^1.0.1", + "@radix-ui/react-dialog": "^1.0.2", + "@radix-ui/react-popover": "^1.0.2", + "@radix-ui/react-progress": "^1.1.0", + "@radix-ui/react-select": "^1.1.2", + "@radix-ui/react-switch": "^1.0.1", + "@radix-ui/react-tabs": "^1.0.1", + "@radix-ui/react-toast": "^1.1.2", + "@radix-ui/react-tooltip": "^1.0.2", + "@tanstack/match-sorter-utils": "^8.7.2", + "@tanstack/react-query": "^4.36.1", + "@tanstack/react-table": "^8.7.4", + "axios": "^1.2.1", + "clsx": "^1.2.1", + "date-fns": "^2.29.3", + "downshift": "^7.0.5", + "esbuild": "^0.19.5", + "formik": "^2.4.6", + "framer-motion": "^8.5.5", + "howler": "^2.2.4", + "i18next": "^22.4.5", + "i18next-browser-languagedetector": "^7.0.1", + "linkify-react": "^4.1.1", + "linkifyjs": "^4.1.1", + "lodash": "^4.17.21", + "moment": "^2.30.1", + "react": "^18.2.0", + "react-color": "^2.19.3", + "react-cookie": "^4.1.1", + "react-datepicker": "^4.8.0", + "react-dnd": "^16.0.1", + "react-dnd-html5-backend": "^16.0.1", + "react-dom": "^18.2.0", + "react-hook-form": "^7.52.1", + "react-i18next": "^12.1.1", + "react-icons": "^4.10.1", + "react-idle-timer": "^5.5.2", + "react-modal": "^3.16.1", + "react-redux": "^8.1.1", + "react-router-dom": "^6.5.0", + "react-select": "^5.7.4", + "react-text-selection-popover": "^2.0.2", + "react-textarea-autosize": "^8.4.0", + "reactflow": "^11.4.0", + "regexify-string": "^1.0.19", + "rxjs": "^7.8.1", + "timeago.js": "^4.0.2", + "usehooks-ts": "^2.9.1", + "uuid": "^9.0.0", + "yup": "^1.4.0", + "zustand": "^4.4.4" + }, + "devDependencies": { + "@types/howler": "^2.2.11", + "@types/lodash": "^4.14.191", + "@types/lodash.debounce": "^4.0.7", + "@types/node": "^18.11.17", + "@types/react": "^18.0.26", + "@types/react-color": "^3.0.6", + "@types/react-datepicker": "^4.8.0", + "@types/react-dom": "^18.0.9", + "@types/uuid": "^9.0.2", + "@typescript-eslint/eslint-plugin": "^8.32.1", + "@typescript-eslint/parser": "^8.32.1", + "@vitejs/plugin-react": "^3.0.0", + "eslint": "^8.57.1", + "eslint-config-react-app": "^7.0.1", + "eslint-plugin-import": "^2.31.0", + "eslint-plugin-jsx-a11y": "^6.10.2", + "eslint-plugin-react": "^7.37.5", + "eslint-plugin-react-hooks": "^5.2.0", + "eslint-plugin-typescript": "^0.14.0", + "mocksse": "^1.0.4", + "msw": "^0.49.2", + "prettier": "^2.8.1", + "sass": "^1.57.0", + "typescript": "^4.9.3", + "vite": "^4.0.0", + "vite-plugin-env-compatible": "^1.1.1", + "vite-plugin-svgr": "^2.4.0", + "vite-plugin-transform": "^2.0.1", + "vite-tsconfig-paths": "^4.0.3" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", + "dependencies": { + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.7.tgz", + "integrity": "sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.7.tgz", + "integrity": "sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==", + "dev": true, + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helpers": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/eslint-parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.24.7.tgz", + "integrity": "sha512-SO5E3bVxDuxyNxM5agFv480YA2HO6ohZbGxbazZdIk3KQOPOGVNw6q78I9/lbviIf95eq6tPozeYnJLbjnC8IA==", + "dev": true, + "dependencies": { + "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", + "eslint-visitor-keys": "^2.1.0", + "semver": "^6.3.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || >=14.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.11.0", + "eslint": "^7.5.0 || ^8.0.0 || ^9.0.0" + } + }, + "node_modules/@babel/eslint-parser/node_modules/eslint-visitor-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/@babel/generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", + "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", + "dependencies": { + "@babel/types": "^7.24.7", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz", + "integrity": "sha512-BaDeOonYvhdKw+JoMVkAixAAJzG2jVPIwWoKBPdYuY9b452e2rPuI9QPYh3KpofZ3pW2akOmwZLOiOsHMiqRAg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz", + "integrity": "sha512-xZeCVVdwb4MsDBkkyZ64tReWYrLRHlMN72vP7Bdm3OUOuyFZExhsHUUnuWnm2/XOlAJzR0LfPpB56WXZn0X/lA==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.24.7.tgz", + "integrity": "sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.7.tgz", + "integrity": "sha512-kTkaDl7c9vO80zeX1rJxnuRpEsD5tA81yh11X1gQo+PhSti3JS+7qeZo9U4RHobKRiFPKaGK3svUAeb8D0Q7eg==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.24.7.tgz", + "integrity": "sha512-03TCmXy2FtXJEZfbXDTSqq1fRJArk7lX9DOFC/47VthYcxyIOx+eXQmdo6DOQvrbpIix+KfXwvuXdFDZHxt+rA==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", + "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", + "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", + "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.7.tgz", + "integrity": "sha512-LGeMaf5JN4hAT471eJdBs/GK1DoYIJ5GCtZN/EsL6KUiiDZOvO/eKE11AMZJa2zP4zk4qe9V2O/hxAmkRc8p6w==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.7.tgz", + "integrity": "sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz", + "integrity": "sha512-jKiTsW2xmWwxT1ixIdfXUZp+P5yURx2suzLZr5Hi64rURpDYdMW0pv+Uf17EYk2Rd428Lx4tLsnjGJzYKDM/6A==", + "dev": true, + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.7.tgz", + "integrity": "sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.24.7.tgz", + "integrity": "sha512-9pKLcTlZ92hNZMQfGCHImUpDOlAgkkpqalWEeftW5FBya75k8Li2ilerxkM/uBEj01iBZXcCIB/bwvDYgWyibA==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-wrap-function": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.24.7.tgz", + "integrity": "sha512-qTAxxBM81VEyoAY0TtLrx1oAEJc09ZK67Q9ljQToqCnA+55eNwCORaxlKyu+rNfX86o8OXRUSNUnrtsAZXM9sg==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz", + "integrity": "sha512-IO+DLT3LQUElMbpzlatRASEyQtfhSE0+m465v++3jyyXeBTBUjtVZg28/gHeV5mrTJqvEKhKroBGAvhW+qPHiQ==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", + "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", + "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.7.tgz", + "integrity": "sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.24.7.tgz", + "integrity": "sha512-N9JIYk3TD+1vq/wn77YnJOqMtfWhNewNE+DJV4puD2X7Ew9J4JvrzrFDfTfyv5EgEXVy9/Wt8QiOErzEmv5Ifw==", + "dev": true, + "dependencies": { + "@babel/helper-function-name": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.7.tgz", + "integrity": "sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==", + "dev": true, + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", + "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.7.tgz", + "integrity": "sha512-TiT1ss81W80eQsN+722OaeQMY/G4yTb4G9JrqeiDADs3N8lbPMGldWi9x8tyqCW5NLx1Jh2AvkE6r6QvEltMMQ==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.7.tgz", + "integrity": "sha512-unaQgZ/iRu/By6tsjMZzpeBZjChYfLYry6HrEXPoz3KmfF0sVBQ1l8zKMQ4xRGLWVsjuvB8nQfjNP/DcfEOCsg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz", + "integrity": "sha512-+izXIbke1T33mY4MSNnrqhPXDz01WYhEf3yF5NbnUtkiNnm+XBZJl3kNfoK6NKmYlz/D07+l2GWVK/QfDkNCuQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.7.tgz", + "integrity": "sha512-utA4HuR6F4Vvcr+o4DnjL8fCOlgRFGbeeBEGNg3ZTrLFw6VWG5XmUrvcQ0FjIYMU2ST4XcR2Wsp7t9qOAPnxMg==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-class-properties": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", + "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-class-properties instead.", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-decorators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.24.7.tgz", + "integrity": "sha512-RL9GR0pUG5Kc8BUWLNDm2T5OpYwSX15r98I0IkgmRQTXuELq/OynH8xtMTMvTJFjXbMWFVTKtYkTaYQsuAwQlQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-decorators": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", + "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-nullish-coalescing-operator instead.", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-numeric-separator": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", + "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-numeric-separator instead.", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-optional-chaining": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz", + "integrity": "sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-optional-chaining instead.", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.20.2", + "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-private-methods": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", + "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-methods instead.", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "dev": true, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-decorators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.24.7.tgz", + "integrity": "sha512-Ui4uLJJrRV1lb38zg1yYTmRKmiZLiftDEvZN2iq3kd9kUFU+PttmzTbAFC2ucRk/XJmtek6G23gPsuZbhrT8fQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-flow": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.24.7.tgz", + "integrity": "sha512-9G8GYT/dxn/D1IIKOUBmGX0mnmj46mGH9NnZyJLwtCpgh5f7D2VbuKodb+2s9m1Yavh1s7ASQN8lf0eqrb1LTw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.7.tgz", + "integrity": "sha512-Ec3NRUMoi8gskrkBe3fNmEQfxDvY8bgfQpz6jlk/41kX9eUjvpyqWU7PBP/pLAvMaSQjbMNKJmvX57jP+M6bPg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.7.tgz", + "integrity": "sha512-hbX+lKKeUMGihnK8nvKqmXBInriT3GVjzXKFriV3YC6APGxMbP8RZNFwy91+hocLXq90Mta+HshoB31802bb8A==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", + "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.7.tgz", + "integrity": "sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz", + "integrity": "sha512-Dt9LQs6iEY++gXUwY03DNFat5C2NbO48jj+j/bSAz6b3HgPs39qcPiYt77fDObIcFwj3/C2ICX9YMwGflUoSHQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.7.tgz", + "integrity": "sha512-o+iF77e3u7ZS4AoAuJvapz9Fm001PuD2V3Lp6OSE4FYQke+cSewYtnek+THqGRWyQloRCyvWL1OkyfNEl9vr/g==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz", + "integrity": "sha512-SQY01PcJfmQ+4Ash7NE+rpbLFbmqA2GPIgqzxfFTL4t1FKRq4zTms/7htKpoCUI9OcFYgzqfmCdH53s6/jn5fA==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz", + "integrity": "sha512-yO7RAz6EsVQDaBH18IDJcMB1HnrUn2FJ/Jslc/WtPPWcjhpUJXU/rjbwmluzp7v/ZzWcEhTMXELnnsz8djWDwQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.7.tgz", + "integrity": "sha512-Nd5CvgMbWc+oWzBsuaMcbwjJWAcp5qzrbg69SZdHSP7AMY0AbWFqFO0WTFCA1jxhMCwodRwvRec8k0QUbZk7RQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.7.tgz", + "integrity": "sha512-vKbfawVYayKcSeSR5YYzzyXvsDFWU2mD8U5TFeXtbCPLFUqe7GyCgvO6XDHzje862ODrOwy6WCPmKeWHbCFJ4w==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz", + "integrity": "sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.7.tgz", + "integrity": "sha512-CFbbBigp8ln4FU6Bpy6g7sE8B/WmCmzvivzUC6xDAdWVsjYTXijpuuGJmYkAaoWAzcItGKT3IOAbxRItZ5HTjw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz", + "integrity": "sha512-25cS7v+707Gu6Ds2oY6tCkUwsJ9YIDbggd9+cu9jzzDgiNq7hR/8dkzxWfKWnTic26vsI3EsCXNd4iEB6e8esQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/template": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.7.tgz", + "integrity": "sha512-19eJO/8kdCQ9zISOf+SEUJM/bAUIsvY3YDnXZTupUCQ8LgrWnsG/gFB9dvXqdXnRXMAM8fvt7b0CBKQHNGy1mw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz", + "integrity": "sha512-ZOA3W+1RRTSWvyqcMJDLqbchh7U4NRGqwRfFSVbOLS/ePIP4vHB5e8T8eXcuqyN1QkgKyj5wuW0lcS85v4CrSw==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz", + "integrity": "sha512-JdYfXyCRihAe46jUIliuL2/s0x0wObgwwiGxw/UbgJBr20gQBThrokO4nYKgWkD7uBaqM7+9x5TU7NkExZJyzw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz", + "integrity": "sha512-sc3X26PhZQDb3JhORmakcbvkeInvxz+A8oda99lj7J60QRuPZvNAk9wQlTBS1ZynelDrDmTU4pw1tyc5d5ZMUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz", + "integrity": "sha512-Rqe/vSc9OYgDajNIK35u7ot+KeCoetqQYFXM4Epf7M7ez3lWlOjrDjrwMei6caCVhfdw+mIKD4cgdGNy5JQotQ==", + "dev": true, + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz", + "integrity": "sha512-v0K9uNYsPL3oXZ/7F9NNIbAj2jv1whUEtyA6aujhekLs56R++JDQuzRcP2/z4WX5Vg/c5lE9uWZA0/iUoFhLTA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-flow-strip-types": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.24.7.tgz", + "integrity": "sha512-cjRKJ7FobOH2eakx7Ja+KpJRj8+y+/SiB3ooYm/n2UJfxu0oEaOoxOinitkJcPqv9KxS0kxTGPUaR7L2XcXDXA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-flow": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz", + "integrity": "sha512-wo9ogrDG1ITTTBsy46oGiN1dS9A7MROBTcYsfS8DtsImMkHk9JXJ3EWQM6X2SUw4x80uGPlwj0o00Uoc6nEE3g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.7.tgz", + "integrity": "sha512-U9FcnA821YoILngSmYkW6FjyQe2TyZD5pHt4EVIhmcTkrJw/3KqcrRSxuOo5tFZJi7TE19iDyI1u+weTI7bn2w==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz", + "integrity": "sha512-2yFnBGDvRuxAaE/f0vfBKvtnvvqU8tGpMHqMNpTN2oWMKIR3NqFkjaAgGwawhqK/pIN2T3XdjGPdaG0vDhOBGw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.7.tgz", + "integrity": "sha512-vcwCbb4HDH+hWi8Pqenwnjy+UiklO4Kt1vfspcQYFhJdpthSnW8XvWGyDZWKNVrVbVViI/S7K9PDJZiUmP2fYQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz", + "integrity": "sha512-4D2tpwlQ1odXmTEIFWy9ELJcZHqrStlzK/dAOWYyxX3zT0iXQB6banjgeOJQXzEc4S0E0a5A+hahxPaEFYftsw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz", + "integrity": "sha512-T/hRC1uqrzXMKLQ6UCwMT85S3EvqaBXDGf0FaMf4446Qx9vKwlghvee0+uuZcDUCZU5RuNi4781UQ7R308zzBw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz", + "integrity": "sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.7.tgz", + "integrity": "sha512-iFI8GDxtevHJ/Z22J5xQpVqFLlMNstcLXh994xifFwxxGslr2ZXXLWgtBeLctOD63UFDArdvN6Tg8RFw+aEmjQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.7.tgz", + "integrity": "sha512-GYQE0tW7YoaN13qFh3O1NCY4MPkUiAH3fiF7UcV/I3ajmDKEdG3l+UOcbAm4zUE3gnvUU+Eni7XrVKo9eO9auw==", + "dev": true, + "dependencies": { + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz", + "integrity": "sha512-3aytQvqJ/h9z4g8AsKPLvD4Zqi2qT+L3j7XoFFu1XBlZWEl2/1kWnhmAbxpLgPrHSY0M6UA02jyTiwUVtiKR6A==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz", + "integrity": "sha512-/jr7h/EWeJtk1U/uz2jlsCioHkZk1JJZVcc8oQsJ1dUlaJD83f4/6Zeh2aHt9BIFokHIsSeDfhUmju0+1GPd6g==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz", + "integrity": "sha512-RNKwfRIXg4Ls/8mMTza5oPF5RkOW8Wy/WgMAp1/F1yZ8mMbtwXW+HDoJiOsagWrAhI5f57Vncrmr9XeT4CVapA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz", + "integrity": "sha512-Ts7xQVk1OEocqzm8rHMXHlxvsfZ0cEF2yomUqpKENHWMF4zKk175Y4q8H5knJes6PgYad50uuRmt3UJuhBw8pQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz", + "integrity": "sha512-e6q1TiVUzvH9KRvicuxdBTUj4AdKSRwzIyFFnfnezpCfP2/7Qmbb8qbU2j7GODbl4JMkblitCQjKYUaX/qkkwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz", + "integrity": "sha512-4QrHAr0aXQCEFni2q4DqKLD31n2DL+RxcwnNjDFkSG0eNQ/xCavnRkfCUjsyqGC2OviNJvZOF/mQqZBw7i2C5Q==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz", + "integrity": "sha512-A/vVLwN6lBrMFmMDmPPz0jnE6ZGx7Jq7d6sT/Ev4H65RER6pZ+kczlf1DthF5N0qaPHBsI7UXiE8Zy66nmAovg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz", + "integrity": "sha512-uLEndKqP5BfBbC/5jTwPxLh9kqPWWgzN/f8w6UwAIirAEqiIVJWWY312X72Eub09g5KF9+Zn7+hT7sDxmhRuKA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.7.tgz", + "integrity": "sha512-tK+0N9yd4j+x/4hxF3F0e0fu/VdcxU18y5SevtyM/PCFlQvXbR0Zmlo2eBrKtVipGNFzpq56o8WsIIKcJFUCRQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz", + "integrity": "sha512-yGWW5Rr+sQOhK0Ot8hjDJuxU3XLRQGflvT4lhlSY0DFvdb3TwKaY26CJzHtYllU0vT9j58hc37ndFPsqT1SrzA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.7.tgz", + "integrity": "sha512-COTCOkG2hn4JKGEKBADkA8WNb35TGkkRbI5iT845dB+NyqgO8Hn+ajPbSnIQznneJTa3d30scb6iz/DhH8GsJQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz", + "integrity": "sha512-9z76mxwnwFxMyxZWEgdgECQglF2Q7cFLm0kMf8pGwt+GSJsY0cONKj/UuO4bOH0w/uAel3ekS4ra5CEAyJRmDA==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz", + "integrity": "sha512-EMi4MLQSHfd2nrCqQEWxFdha2gBCqU4ZcCng4WBGZ5CJL4bBRW0ptdqqDdeirGZcpALazVVNJqRmsO8/+oNCBA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.7.tgz", + "integrity": "sha512-H/Snz9PFxKsS1JLI4dJLtnJgCJRoo0AUm3chP6NYr+9En1JMKloheEiLIhlp5MDVznWo+H3AAC1Mc8lmUEpsgg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.24.7.tgz", + "integrity": "sha512-+Dj06GDZEFRYvclU6k4bme55GKBEWUmByM/eoKuqg4zTNQHiApWRhQph5fxQB2wAEFvRzL1tOEj1RJ19wJrhoA==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.24.7.tgz", + "integrity": "sha512-QG9EnzoGn+Qar7rxuW+ZOsbWOt56FvvI93xInqsZDC5fsekx1AlIO4KIJ5M+D0p0SqSH156EpmZyXq630B8OlQ==", + "dev": true, + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.24.7.tgz", + "integrity": "sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.24.7.tgz", + "integrity": "sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.7.tgz", + "integrity": "sha512-PLgBVk3fzbmEjBJ/u8kFzOqS9tUeDjiaWud/rRym/yjCo/M9cASPlnrd2ZmmZpQT40fOOrvR8jh+n8jikrOhNA==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.7.tgz", + "integrity": "sha512-lq3fvXPdimDrlg6LWBoqj+r/DEWgONuwjuOuQCSYgRroXDH/IdM1C0IZf59fL5cHLpjEH/O6opIRBbqv7ELnuA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz", + "integrity": "sha512-0DUq0pHcPKbjFZCfTss/pGkYMfy3vFWydkUBd9r0GHpIyfs2eCDENvqadMycRS9wZCXR41wucAfJHJmwA0UmoQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.7.tgz", + "integrity": "sha512-YqXjrk4C+a1kZjewqt+Mmu2UuV1s07y8kqcUf4qYLnoqemhR4gRQikhdAhSVJioMjVTu6Mo6pAbaypEA3jY6fw==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.1", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz", + "integrity": "sha512-KsDsevZMDsigzbA09+vacnLpmPH4aWjcZjXdyFKGzpplxhbeB4wYtury3vglQkg6KM/xEPKt73eCjPPf1PgXBA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz", + "integrity": "sha512-x96oO0I09dgMDxJaANcRyD4ellXFLLiWhuwDxKZX5g2rWP1bTPkBSwCYv96VDXVT1bD9aPj8tppr5ITIh8hBng==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz", + "integrity": "sha512-kHPSIJc9v24zEml5geKg9Mjx5ULpfncj0wRpYtxbvKyTtHCYDkVE3aHQ03FrpEo4gEe2vrJJS1Y9CJTaThA52g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz", + "integrity": "sha512-AfDTQmClklHCOLxtGoP7HkeMw56k1/bTQjwsfhL6pppo/M4TOBSq+jjBUBLmV/4oeFg4GWMavIl44ZeCtmmZTw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.7.tgz", + "integrity": "sha512-VtR8hDy7YLB7+Pet9IarXjg/zgCMSF+1mNS/EQEiEaUPoFXCVsHG64SIxcaaI2zJgRiv+YmgaQESUfWAdbjzgg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.7.tgz", + "integrity": "sha512-iLD3UNkgx2n/HrjBesVbYX6j0yqn/sJktvbtKKgcaLIQ4bTTQ8obAypc1VpyHPD2y4Phh9zHOaAt8e/L14wCpw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz", + "integrity": "sha512-U3ap1gm5+4edc2Q/P+9VrBNhGkfnf+8ZqppY71Bo/pzZmXhhLdqgaUl6cuB07O1+AQJtCLfaOmswiNbSQ9ivhw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz", + "integrity": "sha512-uH2O4OV5M9FZYQrwc7NdVmMxQJOCCzFeYudlZSzUAHRFeOujQefa92E74TQDVskNHCzOXoigEuoyzHDhaEaK5w==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz", + "integrity": "sha512-hlQ96MBZSAXUq7ltkjtu3FJCCSMx/j629ns3hA3pXnBXjanNP0LHi+JpPeA81zaWgVK1VGH95Xuy7u0RyQ8kMg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.7.tgz", + "integrity": "sha512-2G8aAvF4wy1w/AGZkemprdGMRg5o6zPNhbHVImRz3lss55TYCBd6xStN19rt8XJHq20sqV0JbyWjOWwQRwV/wg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.7.tgz", + "integrity": "sha512-1YZNsc+y6cTvWlDHidMBsQZrZfEFjRIo/BZCT906PMdzOyXtSLTgqGdrpcuTDCXyd11Am5uQULtDIcCfnTc8fQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.7", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.7", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.7", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.7", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.7", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.7", + "@babel/plugin-transform-async-generator-functions": "^7.24.7", + "@babel/plugin-transform-async-to-generator": "^7.24.7", + "@babel/plugin-transform-block-scoped-functions": "^7.24.7", + "@babel/plugin-transform-block-scoping": "^7.24.7", + "@babel/plugin-transform-class-properties": "^7.24.7", + "@babel/plugin-transform-class-static-block": "^7.24.7", + "@babel/plugin-transform-classes": "^7.24.7", + "@babel/plugin-transform-computed-properties": "^7.24.7", + "@babel/plugin-transform-destructuring": "^7.24.7", + "@babel/plugin-transform-dotall-regex": "^7.24.7", + "@babel/plugin-transform-duplicate-keys": "^7.24.7", + "@babel/plugin-transform-dynamic-import": "^7.24.7", + "@babel/plugin-transform-exponentiation-operator": "^7.24.7", + "@babel/plugin-transform-export-namespace-from": "^7.24.7", + "@babel/plugin-transform-for-of": "^7.24.7", + "@babel/plugin-transform-function-name": "^7.24.7", + "@babel/plugin-transform-json-strings": "^7.24.7", + "@babel/plugin-transform-literals": "^7.24.7", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.7", + "@babel/plugin-transform-member-expression-literals": "^7.24.7", + "@babel/plugin-transform-modules-amd": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-modules-systemjs": "^7.24.7", + "@babel/plugin-transform-modules-umd": "^7.24.7", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.24.7", + "@babel/plugin-transform-new-target": "^7.24.7", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.7", + "@babel/plugin-transform-numeric-separator": "^7.24.7", + "@babel/plugin-transform-object-rest-spread": "^7.24.7", + "@babel/plugin-transform-object-super": "^7.24.7", + "@babel/plugin-transform-optional-catch-binding": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7", + "@babel/plugin-transform-parameters": "^7.24.7", + "@babel/plugin-transform-private-methods": "^7.24.7", + "@babel/plugin-transform-private-property-in-object": "^7.24.7", + "@babel/plugin-transform-property-literals": "^7.24.7", + "@babel/plugin-transform-regenerator": "^7.24.7", + "@babel/plugin-transform-reserved-words": "^7.24.7", + "@babel/plugin-transform-shorthand-properties": "^7.24.7", + "@babel/plugin-transform-spread": "^7.24.7", + "@babel/plugin-transform-sticky-regex": "^7.24.7", + "@babel/plugin-transform-template-literals": "^7.24.7", + "@babel/plugin-transform-typeof-symbol": "^7.24.7", + "@babel/plugin-transform-unicode-escapes": "^7.24.7", + "@babel/plugin-transform-unicode-property-regex": "^7.24.7", + "@babel/plugin-transform-unicode-regex": "^7.24.7", + "@babel/plugin-transform-unicode-sets-regex": "^7.24.7", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.4", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.24.7.tgz", + "integrity": "sha512-AAH4lEkpmzFWrGVlHaxJB7RLH21uPQ9+He+eFLWHmF9IuFQVugz8eAsamaW0DXRrTfco5zj1wWtpdcXJUOfsag==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-transform-react-display-name": "^7.24.7", + "@babel/plugin-transform-react-jsx": "^7.24.7", + "@babel/plugin-transform-react-jsx-development": "^7.24.7", + "@babel/plugin-transform-react-pure-annotations": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz", + "integrity": "sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", + "dev": true + }, + "node_modules/@babel/runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", + "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", + "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", + "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", + "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", + "dependencies": { + "@babel/helper-string-parser": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@buerokratt-ria/styles": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/@buerokratt-ria/styles/-/styles-0.0.1.tgz", + "integrity": "sha512-bSj7WsdQO4P/43mRgsa5sDEwBuOebXcl3+Peur8NwToqczqsTMbXSO5P6xyXHoTnHWt082PhT8ht7OAgtFSzfw==" + }, + "node_modules/@emotion/babel-plugin": { + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.11.0.tgz", + "integrity": "sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==", + "dependencies": { + "@babel/helper-module-imports": "^7.16.7", + "@babel/runtime": "^7.18.3", + "@emotion/hash": "^0.9.1", + "@emotion/memoize": "^0.8.1", + "@emotion/serialize": "^1.1.2", + "babel-plugin-macros": "^3.1.0", + "convert-source-map": "^1.5.0", + "escape-string-regexp": "^4.0.0", + "find-root": "^1.1.0", + "source-map": "^0.5.7", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/babel-plugin/node_modules/@emotion/memoize": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.1.tgz", + "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==" + }, + "node_modules/@emotion/babel-plugin/node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==" + }, + "node_modules/@emotion/babel-plugin/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@emotion/cache": { + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.11.0.tgz", + "integrity": "sha512-P34z9ssTCBi3e9EI1ZsWpNHcfY1r09ZO0rZbRO2ob3ZQMnFI35jB536qoXbkdesr5EUhYi22anuEJuyxifaqAQ==", + "dependencies": { + "@emotion/memoize": "^0.8.1", + "@emotion/sheet": "^1.2.2", + "@emotion/utils": "^1.2.1", + "@emotion/weak-memoize": "^0.3.1", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/cache/node_modules/@emotion/memoize": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.1.tgz", + "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==" + }, + "node_modules/@emotion/hash": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.1.tgz", + "integrity": "sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ==" + }, + "node_modules/@emotion/is-prop-valid": { + "version": "0.8.8", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-0.8.8.tgz", + "integrity": "sha512-u5WtneEAr5IDG2Wv65yhunPSMLIpuKsbuOktRojfrEiEvRyC85LgPMZI63cr7NUqT8ZIGdSVg8ZKGxIug4lXcA==", + "optional": true, + "dependencies": { + "@emotion/memoize": "0.7.4" + } + }, + "node_modules/@emotion/memoize": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.7.4.tgz", + "integrity": "sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw==", + "optional": true + }, + "node_modules/@emotion/react": { + "version": "11.11.4", + "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.11.4.tgz", + "integrity": "sha512-t8AjMlF0gHpvvxk5mAtCqR4vmxiGHCeJBaQO6gncUSdklELOgtwjerNY2yuJNfwnc6vi16U/+uMF+afIawJ9iw==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@emotion/babel-plugin": "^11.11.0", + "@emotion/cache": "^11.11.0", + "@emotion/serialize": "^1.1.3", + "@emotion/use-insertion-effect-with-fallbacks": "^1.0.1", + "@emotion/utils": "^1.2.1", + "@emotion/weak-memoize": "^0.3.1", + "hoist-non-react-statics": "^3.3.1" + }, + "peerDependencies": { + "react": ">=16.8.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@emotion/serialize": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.1.4.tgz", + "integrity": "sha512-RIN04MBT8g+FnDwgvIUi8czvr1LU1alUMI05LekWB5DGyTm8cCBMCRpq3GqaiyEDRptEXOyXnvZ58GZYu4kBxQ==", + "dependencies": { + "@emotion/hash": "^0.9.1", + "@emotion/memoize": "^0.8.1", + "@emotion/unitless": "^0.8.1", + "@emotion/utils": "^1.2.1", + "csstype": "^3.0.2" + } + }, + "node_modules/@emotion/serialize/node_modules/@emotion/memoize": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.1.tgz", + "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==" + }, + "node_modules/@emotion/sheet": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.2.2.tgz", + "integrity": "sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA==" + }, + "node_modules/@emotion/unitless": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.8.1.tgz", + "integrity": "sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==" + }, + "node_modules/@emotion/use-insertion-effect-with-fallbacks": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.1.tgz", + "integrity": "sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==", + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@emotion/utils": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.2.1.tgz", + "integrity": "sha512-Y2tGf3I+XVnajdItskUCn6LX+VUDmP6lTL4fcqsXAv43dnlbZiuW4MWQW38rW/BVWSE7Q/7+XQocmpnRYILUmg==" + }, + "node_modules/@emotion/weak-memoize": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.3.1.tgz", + "integrity": "sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww==" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", + "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", + "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", + "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", + "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", + "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", + "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", + "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", + "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", + "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", + "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", + "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", + "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", + "cpu": [ + "mips64el" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", + "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", + "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", + "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", + "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", + "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", + "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", + "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", + "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", + "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", + "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.7.0.tgz", + "integrity": "sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.1.tgz", + "integrity": "sha512-Zm2NGpWELsQAD1xsJzGQpYfvICSsFkEpU0jxBjfdC6uNEWXcHnfs9hScFWtXVDVl+rBQJGrl4g1vcKIejpH9dA==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.2.tgz", + "integrity": "sha512-+2XpQV9LLZeanU4ZevzRnGFg2neDeKHgFLjP6YLW+tly0IvrhqT4u8enLGjLH3qeh85g19xY5rsAusfwTdn5lg==", + "dependencies": { + "@floating-ui/utils": "^0.2.0" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.6.5", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.5.tgz", + "integrity": "sha512-Nsdud2X65Dz+1RHjAIP0t8z5e2ff/IRbei6BqFrl1urT8sDVzM1HMQ+R0XcU5ceRfyO3I6ayeqIfh+6Wb8LGTw==", + "dependencies": { + "@floating-ui/core": "^1.0.0", + "@floating-ui/utils": "^0.2.0" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.0.tgz", + "integrity": "sha512-lNzj5EQmEKn5FFKc04+zasr09h/uX8RtJRNj5gUXsSQIXHVWTVh+hVAg1vOMCexkX8EgvemMvIFpQfkosnVNyA==", + "dependencies": { + "@floating-ui/dom": "^1.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.2.tgz", + "integrity": "sha512-J4yDIIthosAsRZ5CPYP/jQvUAQtlZTTD/4suA08/FEnlxqW3sKS9iAhgsa9VYLZ6vDHn/ixJgIqRQPotoBjxIw==" + }, + "node_modules/@fontsource/roboto": { + "version": "4.5.8", + "resolved": "https://registry.npmjs.org/@fontsource/roboto/-/roboto-4.5.8.tgz", + "integrity": "sha512-CnD7zLItIzt86q4Sj3kZUiLcBk1dSk81qcqgMGaZe7SQ1P8hFNxhMl5AZthK1zrDM5m74VVhaOpuMGIL4gagaA==" + }, + "node_modules/@formkit/auto-animate": { + "version": "1.0.0-pre-alpha.3", + "resolved": "https://registry.npmjs.org/@formkit/auto-animate/-/auto-animate-1.0.0-pre-alpha.3.tgz", + "integrity": "sha512-lMVZ3LFUIu0RIxCEwmV8nUUJQ46M2bv2NDU3hrhZivViuR1EheC8Mj5sx/ACqK5QLK8XB8z7GDIZBUGdU/9OZQ==", + "peerDependencies": { + "react": "^16.8.0", + "vue": "^3.0.0" + }, + "peerDependenciesMeta": { + "react": { + "optional": true + }, + "vue": { + "optional": true + } + } + }, + "node_modules/@fortaine/fetch-event-source": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@fortaine/fetch-event-source/-/fetch-event-source-3.0.6.tgz", + "integrity": "sha512-621GAuLMvKtyZQ3IA6nlDWhV1V/7PGOTNIGLUifxt0KzM+dZIweJ6F3XvQF3QnqeNfS1N7WQ0Kil1Di/lhChEw==", + "engines": { + "node": ">=16.15" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@icons/material": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/@icons/material/-/material-0.2.4.tgz", + "integrity": "sha512-QPcGmICAPbGLGb6F/yNf/KzKqvFx8z5qx3D1yFqVAjoFmXK35EgyW+cJ57Te3CNsmzblwtzakLGFqHPqrfb4Tw==", + "peerDependencies": { + "react": "*" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@lezer/common": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", + "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" + }, + "node_modules/@lezer/lr": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz", + "integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lmdb/lmdb-darwin-arm64": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/@lmdb/lmdb-darwin-arm64/-/lmdb-darwin-arm64-2.8.5.tgz", + "integrity": "sha512-KPDeVScZgA1oq0CiPBcOa3kHIqU+pTOwRFDIhxvmf8CTNvqdZQYp5cCKW0bUk69VygB2PuTiINFWbY78aR2pQw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@lmdb/lmdb-darwin-x64": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/@lmdb/lmdb-darwin-x64/-/lmdb-darwin-x64-2.8.5.tgz", + "integrity": "sha512-w/sLhN4T7MW1nB3R/U8WK5BgQLz904wh+/SmA2jD8NnF7BLLoUgflCNxOeSPOWp8geP6nP/+VjWzZVip7rZ1ug==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@lmdb/lmdb-linux-arm": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/@lmdb/lmdb-linux-arm/-/lmdb-linux-arm-2.8.5.tgz", + "integrity": "sha512-c0TGMbm2M55pwTDIfkDLB6BpIsgxV4PjYck2HiOX+cy/JWiBXz32lYbarPqejKs9Flm7YVAKSILUducU9g2RVg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@lmdb/lmdb-linux-arm64": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/@lmdb/lmdb-linux-arm64/-/lmdb-linux-arm64-2.8.5.tgz", + "integrity": "sha512-vtbZRHH5UDlL01TT5jB576Zox3+hdyogvpcbvVJlmU5PdL3c5V7cj1EODdh1CHPksRl+cws/58ugEHi8bcj4Ww==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@lmdb/lmdb-linux-x64": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/@lmdb/lmdb-linux-x64/-/lmdb-linux-x64-2.8.5.tgz", + "integrity": "sha512-Xkc8IUx9aEhP0zvgeKy7IQ3ReX2N8N1L0WPcQwnZweWmOuKfwpS3GRIYqLtK5za/w3E60zhFfNdS+3pBZPytqQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@lmdb/lmdb-win32-x64": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/@lmdb/lmdb-win32-x64/-/lmdb-win32-x64-2.8.5.tgz", + "integrity": "sha512-4wvrf5BgnR8RpogHhtpCPJMKBmvyZPhhUtEwMJbXh0ni2BucpfF07jlmyM11zRqQ2XIq6PbC2j7W7UCCcm1rRQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@mischnic/json-sourcemap": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@mischnic/json-sourcemap/-/json-sourcemap-0.1.1.tgz", + "integrity": "sha512-iA7+tyVqfrATAIsIRWQG+a7ZLLD0VaOCKV2Wd/v4mqIU3J9c4jx9p7S0nw1XH3gJCKNBOOwACOPYYSUu9pgT+w==", + "dependencies": { + "@lezer/common": "^1.0.0", + "@lezer/lr": "^1.0.0", + "json5": "^2.2.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/@motionone/animation": { + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.18.0.tgz", + "integrity": "sha512-9z2p5GFGCm0gBsZbi8rVMOAJCtw1WqBTIPw3ozk06gDvZInBPIsQcHgYogEJ4yuHJ+akuW8g1SEIOpTOvYs8hw==", + "dependencies": { + "@motionone/easing": "^10.18.0", + "@motionone/types": "^10.17.1", + "@motionone/utils": "^10.18.0", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/dom": { + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/dom/-/dom-10.18.0.tgz", + "integrity": "sha512-bKLP7E0eyO4B2UaHBBN55tnppwRnaE3KFfh3Ps9HhnAkar3Cb69kUCJY9as8LrccVYKgHA+JY5dOQqJLOPhF5A==", + "dependencies": { + "@motionone/animation": "^10.18.0", + "@motionone/generators": "^10.18.0", + "@motionone/types": "^10.17.1", + "@motionone/utils": "^10.18.0", + "hey-listen": "^1.0.8", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/easing": { + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.18.0.tgz", + "integrity": "sha512-VcjByo7XpdLS4o9T8t99JtgxkdMcNWD3yHU/n6CLEz3bkmKDRZyYQ/wmSf6daum8ZXqfUAgFeCZSpJZIMxaCzg==", + "dependencies": { + "@motionone/utils": "^10.18.0", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/generators": { + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.18.0.tgz", + "integrity": "sha512-+qfkC2DtkDj4tHPu+AFKVfR/C30O1vYdvsGYaR13W/1cczPrrcjdvYCj0VLFuRMN+lP1xvpNZHCRNM4fBzn1jg==", + "dependencies": { + "@motionone/types": "^10.17.1", + "@motionone/utils": "^10.18.0", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/types": { + "version": "10.17.1", + "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.17.1.tgz", + "integrity": "sha512-KaC4kgiODDz8hswCrS0btrVrzyU2CSQKO7Ps90ibBVSQmjkrt2teqta6/sOG59v7+dPnKMAg13jyqtMKV2yJ7A==" + }, + "node_modules/@motionone/utils": { + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.18.0.tgz", + "integrity": "sha512-3XVF7sgyTSI2KWvTf6uLlBJ5iAgRgmvp3bpuOiQJvInd4nZ19ET8lX5unn30SlmRH7hXbBbH+Gxd0m0klJ3Xtw==", + "dependencies": { + "@motionone/types": "^10.17.1", + "hey-listen": "^1.0.8", + "tslib": "^2.3.1" + } + }, + "node_modules/@msgpackr-extract/msgpackr-extract-darwin-arm64": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-darwin-arm64/-/msgpackr-extract-darwin-arm64-3.0.3.tgz", + "integrity": "sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@msgpackr-extract/msgpackr-extract-darwin-x64": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-darwin-x64/-/msgpackr-extract-darwin-x64-3.0.3.tgz", + "integrity": "sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@msgpackr-extract/msgpackr-extract-linux-arm": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-arm/-/msgpackr-extract-linux-arm-3.0.3.tgz", + "integrity": "sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@msgpackr-extract/msgpackr-extract-linux-arm64": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-arm64/-/msgpackr-extract-linux-arm64-3.0.3.tgz", + "integrity": "sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@msgpackr-extract/msgpackr-extract-linux-x64": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-x64/-/msgpackr-extract-linux-x64-3.0.3.tgz", + "integrity": "sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@msgpackr-extract/msgpackr-extract-win32-x64": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-win32-x64/-/msgpackr-extract-win32-x64-3.0.3.tgz", + "integrity": "sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@mswjs/cookies": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@mswjs/cookies/-/cookies-0.2.2.tgz", + "integrity": "sha512-mlN83YSrcFgk7Dm1Mys40DLssI1KdJji2CMKN8eOlBqsTADYzj2+jWzsANsUTFbxDMWPD5e9bfA1RGqBpS3O1g==", + "dev": true, + "dependencies": { + "@types/set-cookie-parser": "^2.4.0", + "set-cookie-parser": "^2.4.6" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@mswjs/interceptors": { + "version": "0.17.10", + "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.17.10.tgz", + "integrity": "sha512-N8x7eSLGcmUFNWZRxT1vsHvypzIRgQYdG0rJey/rZCy6zT/30qDt8Joj7FxzGNLSwXbeZqJOMqDurp7ra4hgbw==", + "dev": true, + "dependencies": { + "@open-draft/until": "^1.0.3", + "@types/debug": "^4.1.7", + "@xmldom/xmldom": "^0.8.3", + "debug": "^4.3.3", + "headers-polyfill": "3.2.5", + "outvariant": "^1.2.1", + "strict-event-emitter": "^0.2.4", + "web-encoding": "^1.1.5" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@mswjs/interceptors/node_modules/headers-polyfill": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-3.2.5.tgz", + "integrity": "sha512-tUCGvt191vNSQgttSyJoibR+VO+I6+iCHIUdhzEMJKE+EAL8BwCN7fUOZlY4ofOelNHsK+gEjxB/B+9N3EWtdA==", + "dev": true + }, + "node_modules/@mswjs/interceptors/node_modules/strict-event-emitter": { + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz", + "integrity": "sha512-KDf/ujU8Zud3YaLtMCcTI4xkZlZVIYxTLr+XIULexP+77EEVWixeXroLUXQXiVtH4XH2W7jr/3PT1v3zBuvc3A==", + "dev": true, + "dependencies": { + "events": "^3.3.0" + } + }, + "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { + "version": "5.1.1-v1", + "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz", + "integrity": "sha512-54/JRvkLIzzDWshCWfuhadfrfZVPiElY8Fcgmg1HroEly/EDSszzhBAsarCux+D/kOslTRquNzuyGSmUSTTHGg==", + "dev": true, + "dependencies": { + "eslint-scope": "5.1.1" + } + }, + "node_modules/@nicolo-ribaudo/eslint-scope-5-internals/node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@nicolo-ribaudo/eslint-scope-5-internals/node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@open-draft/until": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-1.0.3.tgz", + "integrity": "sha512-Aq58f5HiWdyDlFffbbSjAlv596h/cOnt2DO1w3DOC7OJ5EHs0hd/nycJfiu9RJbT6Yk6F1knnRRXNSpxoIVZ9Q==", + "dev": true + }, + "node_modules/@parcel/bundler-default": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/bundler-default/-/bundler-default-2.12.0.tgz", + "integrity": "sha512-3ybN74oYNMKyjD6V20c9Gerdbh7teeNvVMwIoHIQMzuIFT6IGX53PyOLlOKRLbjxMc0TMimQQxIt2eQqxR5LsA==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/graph": "3.2.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/cache": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/cache/-/cache-2.12.0.tgz", + "integrity": "sha512-FX5ZpTEkxvq/yvWklRHDESVRz+c7sLTXgFuzz6uEnBcXV38j6dMSikflNpHA6q/L4GKkCqRywm9R6XQwhwIMyw==", + "dependencies": { + "@parcel/fs": "2.12.0", + "@parcel/logger": "2.12.0", + "@parcel/utils": "2.12.0", + "lmdb": "2.8.5" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/codeframe": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/codeframe/-/codeframe-2.12.0.tgz", + "integrity": "sha512-v2VmneILFiHZJTxPiR7GEF1wey1/IXPdZMcUlNXBiPZyWDfcuNgGGVQkx/xW561rULLIvDPharOMdxz5oHOKQg==", + "dependencies": { + "chalk": "^4.1.0" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/codeframe/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@parcel/codeframe/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@parcel/codeframe/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@parcel/codeframe/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/@parcel/codeframe/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/codeframe/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/compressor-raw": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/compressor-raw/-/compressor-raw-2.12.0.tgz", + "integrity": "sha512-h41Q3X7ZAQ9wbQ2csP8QGrwepasLZdXiuEdpUryDce6rF9ZiHoJ97MRpdLxOhOPyASTw/xDgE1xyaPQr0Q3f5A==", + "dependencies": { + "@parcel/plugin": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/config-default": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/config-default/-/config-default-2.12.0.tgz", + "integrity": "sha512-dPNe2n9eEsKRc1soWIY0yToMUPirPIa2QhxcCB3Z5RjpDGIXm0pds+BaiqY6uGLEEzsjhRO0ujd4v2Rmm0vuFg==", + "dependencies": { + "@parcel/bundler-default": "2.12.0", + "@parcel/compressor-raw": "2.12.0", + "@parcel/namer-default": "2.12.0", + "@parcel/optimizer-css": "2.12.0", + "@parcel/optimizer-htmlnano": "2.12.0", + "@parcel/optimizer-image": "2.12.0", + "@parcel/optimizer-svgo": "2.12.0", + "@parcel/optimizer-swc": "2.12.0", + "@parcel/packager-css": "2.12.0", + "@parcel/packager-html": "2.12.0", + "@parcel/packager-js": "2.12.0", + "@parcel/packager-raw": "2.12.0", + "@parcel/packager-svg": "2.12.0", + "@parcel/packager-wasm": "2.12.0", + "@parcel/reporter-dev-server": "2.12.0", + "@parcel/resolver-default": "2.12.0", + "@parcel/runtime-browser-hmr": "2.12.0", + "@parcel/runtime-js": "2.12.0", + "@parcel/runtime-react-refresh": "2.12.0", + "@parcel/runtime-service-worker": "2.12.0", + "@parcel/transformer-babel": "2.12.0", + "@parcel/transformer-css": "2.12.0", + "@parcel/transformer-html": "2.12.0", + "@parcel/transformer-image": "2.12.0", + "@parcel/transformer-js": "2.12.0", + "@parcel/transformer-json": "2.12.0", + "@parcel/transformer-postcss": "2.12.0", + "@parcel/transformer-posthtml": "2.12.0", + "@parcel/transformer-raw": "2.12.0", + "@parcel/transformer-react-refresh-wrap": "2.12.0", + "@parcel/transformer-svg": "2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/core": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/core/-/core-2.12.0.tgz", + "integrity": "sha512-s+6pwEj+GfKf7vqGUzN9iSEPueUssCCQrCBUlcAfKrJe0a22hTUCjewpB0I7lNrCIULt8dkndD+sMdOrXsRl6Q==", + "dependencies": { + "@mischnic/json-sourcemap": "^0.1.0", + "@parcel/cache": "2.12.0", + "@parcel/diagnostic": "2.12.0", + "@parcel/events": "2.12.0", + "@parcel/fs": "2.12.0", + "@parcel/graph": "3.2.0", + "@parcel/logger": "2.12.0", + "@parcel/package-manager": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/profiler": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "@parcel/workers": "2.12.0", + "abortcontroller-polyfill": "^1.1.9", + "base-x": "^3.0.8", + "browserslist": "^4.6.6", + "clone": "^2.1.1", + "dotenv": "^7.0.0", + "dotenv-expand": "^5.1.0", + "json5": "^2.2.0", + "msgpackr": "^1.9.9", + "nullthrows": "^1.1.1", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/core/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/diagnostic": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/diagnostic/-/diagnostic-2.12.0.tgz", + "integrity": "sha512-8f1NOsSFK+F4AwFCKynyIu9Kr/uWHC+SywAv4oS6Bv3Acig0gtwUjugk0C9UaB8ztBZiW5TQZhw+uPZn9T/lJA==", + "dependencies": { + "@mischnic/json-sourcemap": "^0.1.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/events": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/events/-/events-2.12.0.tgz", + "integrity": "sha512-nmAAEIKLjW1kB2cUbCYSmZOGbnGj8wCzhqnK727zCCWaA25ogzAtt657GPOeFyqW77KyosU728Tl63Fc8hphIA==", + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/fs": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/fs/-/fs-2.12.0.tgz", + "integrity": "sha512-NnFkuvou1YBtPOhTdZr44WN7I60cGyly2wpHzqRl62yhObyi1KvW0SjwOMa0QGNcBOIzp4G0CapoZ93hD0RG5Q==", + "dependencies": { + "@parcel/rust": "2.12.0", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "@parcel/watcher": "^2.0.7", + "@parcel/workers": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/graph": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@parcel/graph/-/graph-3.2.0.tgz", + "integrity": "sha512-xlrmCPqy58D4Fg5umV7bpwDx5Vyt7MlnQPxW68vae5+BA4GSWetfZt+Cs5dtotMG2oCHzZxhIPt7YZ7NRyQzLA==", + "dependencies": { + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/logger": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/logger/-/logger-2.12.0.tgz", + "integrity": "sha512-cJ7Paqa7/9VJ7C+KwgJlwMqTQBOjjn71FbKk0G07hydUEBISU2aDfmc/52o60ErL9l+vXB26zTrIBanbxS8rVg==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/events": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/markdown-ansi": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/markdown-ansi/-/markdown-ansi-2.12.0.tgz", + "integrity": "sha512-WZz3rzL8k0H3WR4qTHX6Ic8DlEs17keO9gtD4MNGyMNQbqQEvQ61lWJaIH0nAtgEetu0SOITiVqdZrb8zx/M7w==", + "dependencies": { + "chalk": "^4.1.0" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/markdown-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@parcel/markdown-ansi/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@parcel/markdown-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@parcel/markdown-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/@parcel/markdown-ansi/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/markdown-ansi/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/namer-default": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/namer-default/-/namer-default-2.12.0.tgz", + "integrity": "sha512-9DNKPDHWgMnMtqqZIMiEj/R9PNWW16lpnlHjwK3ciRlMPgjPJ8+UNc255teZODhX0T17GOzPdGbU/O/xbxVPzA==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/node-resolver-core": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@parcel/node-resolver-core/-/node-resolver-core-3.3.0.tgz", + "integrity": "sha512-rhPW9DYPEIqQBSlYzz3S0AjXxjN6Ub2yS6tzzsW/4S3Gpsgk/uEq4ZfxPvoPf/6TgZndVxmKwpmxaKtGMmf3cA==", + "dependencies": { + "@mischnic/json-sourcemap": "^0.1.0", + "@parcel/diagnostic": "2.12.0", + "@parcel/fs": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/node-resolver-core/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/optimizer-css": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/optimizer-css/-/optimizer-css-2.12.0.tgz", + "integrity": "sha512-ifbcC97fRzpruTjaa8axIFeX4MjjSIlQfem3EJug3L2AVqQUXnM1XO8L0NaXGNLTW2qnh1ZjIJ7vXT/QhsphsA==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/utils": "2.12.0", + "browserslist": "^4.6.6", + "lightningcss": "^1.22.1", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/optimizer-htmlnano": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/optimizer-htmlnano/-/optimizer-htmlnano-2.12.0.tgz", + "integrity": "sha512-MfPMeCrT8FYiOrpFHVR+NcZQlXAptK2r4nGJjfT+ndPBhEEZp4yyL7n1y7HfX9geg5altc4WTb4Gug7rCoW8VQ==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "htmlnano": "^2.0.0", + "nullthrows": "^1.1.1", + "posthtml": "^0.16.5", + "svgo": "^2.4.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/optimizer-image": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/optimizer-image/-/optimizer-image-2.12.0.tgz", + "integrity": "sha512-bo1O7raeAIbRU5nmNVtx8divLW9Xqn0c57GVNGeAK4mygnQoqHqRZ0mR9uboh64pxv6ijXZHPhKvU9HEpjPjBQ==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/utils": "2.12.0", + "@parcel/workers": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/optimizer-svgo": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/optimizer-svgo/-/optimizer-svgo-2.12.0.tgz", + "integrity": "sha512-Kyli+ZZXnoonnbeRQdoWwee9Bk2jm/49xvnfb+2OO8NN0d41lblBoRhOyFiScRnJrw7eVl1Xrz7NTkXCIO7XFQ==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "svgo": "^2.4.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/optimizer-swc": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/optimizer-swc/-/optimizer-swc-2.12.0.tgz", + "integrity": "sha512-iBi6LZB3lm6WmbXfzi8J3DCVPmn4FN2lw7DGXxUXu7MouDPVWfTsM6U/5TkSHJRNRogZ2gqy5q9g34NPxHbJcw==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/utils": "2.12.0", + "@swc/core": "^1.3.36", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/package-manager": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/package-manager/-/package-manager-2.12.0.tgz", + "integrity": "sha512-0nvAezcjPx9FT+hIL+LS1jb0aohwLZXct7jAh7i0MLMtehOi0z1Sau+QpgMlA9rfEZZ1LIeFdnZZwqSy7Ccspw==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/fs": "2.12.0", + "@parcel/logger": "2.12.0", + "@parcel/node-resolver-core": "3.3.0", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "@parcel/workers": "2.12.0", + "@swc/core": "^1.3.36", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/package-manager/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/packager-css": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/packager-css/-/packager-css-2.12.0.tgz", + "integrity": "sha512-j3a/ODciaNKD19IYdWJT+TP+tnhhn5koBGBWWtrKSu0UxWpnezIGZetit3eE+Y9+NTePalMkvpIlit2eDhvfJA==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/utils": "2.12.0", + "lightningcss": "^1.22.1", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/packager-html": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/packager-html/-/packager-html-2.12.0.tgz", + "integrity": "sha512-PpvGB9hFFe+19NXGz2ApvPrkA9GwEqaDAninT+3pJD57OVBaxB8U+HN4a5LICKxjUppPPqmrLb6YPbD65IX4RA==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1", + "posthtml": "^0.16.5" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/packager-js": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/packager-js/-/packager-js-2.12.0.tgz", + "integrity": "sha512-viMF+FszITRRr8+2iJyk+4ruGiL27Y6AF7hQ3xbJfzqnmbOhGFtLTQwuwhOLqN/mWR2VKdgbLpZSarWaO3yAMg==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "globals": "^13.2.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/packager-js/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@parcel/packager-js/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@parcel/packager-raw": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/packager-raw/-/packager-raw-2.12.0.tgz", + "integrity": "sha512-tJZqFbHqP24aq1F+OojFbQIc09P/u8HAW5xfndCrFnXpW4wTgM3p03P0xfw3gnNq+TtxHJ8c3UFE5LnXNNKhYA==", + "dependencies": { + "@parcel/plugin": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/packager-svg": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/packager-svg/-/packager-svg-2.12.0.tgz", + "integrity": "sha512-ldaGiacGb2lLqcXas97k8JiZRbAnNREmcvoY2W2dvW4loVuDT9B9fU777mbV6zODpcgcHWsLL3lYbJ5Lt3y9cg==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "posthtml": "^0.16.4" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/packager-wasm": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/packager-wasm/-/packager-wasm-2.12.0.tgz", + "integrity": "sha512-fYqZzIqO9fGYveeImzF8ll6KRo2LrOXfD+2Y5U3BiX/wp9wv17dz50QLDQm9hmTcKGWxK4yWqKQh+Evp/fae7A==", + "dependencies": { + "@parcel/plugin": "2.12.0" + }, + "engines": { + "node": ">=12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/plugin": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/plugin/-/plugin-2.12.0.tgz", + "integrity": "sha512-nc/uRA8DiMoe4neBbzV6kDndh/58a4wQuGKw5oEoIwBCHUvE2W8ZFSu7ollSXUGRzfacTt4NdY8TwS73ScWZ+g==", + "dependencies": { + "@parcel/types": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/profiler": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/profiler/-/profiler-2.12.0.tgz", + "integrity": "sha512-q53fvl5LDcFYzMUtSusUBZSjQrKjMlLEBgKeQHFwkimwR1mgoseaDBDuNz0XvmzDzF1UelJ02TUKCGacU8W2qA==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/events": "2.12.0", + "chrome-trace-event": "^1.0.2" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/reporter-cli": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/reporter-cli/-/reporter-cli-2.12.0.tgz", + "integrity": "sha512-TqKsH4GVOLPSCanZ6tcTPj+rdVHERnt5y4bwTM82cajM21bCX1Ruwp8xOKU+03091oV2pv5ieB18pJyRF7IpIw==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "chalk": "^4.1.0", + "term-size": "^2.2.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/reporter-cli/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@parcel/reporter-cli/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@parcel/reporter-cli/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@parcel/reporter-cli/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/@parcel/reporter-cli/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/reporter-cli/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/reporter-dev-server": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/reporter-dev-server/-/reporter-dev-server-2.12.0.tgz", + "integrity": "sha512-tIcDqRvAPAttRlTV28dHcbWT5K2r/MBFks7nM4nrEDHWtnrCwimkDmZTc1kD8QOCCjGVwRHcQybpHvxfwol6GA==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/reporter-tracer": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/reporter-tracer/-/reporter-tracer-2.12.0.tgz", + "integrity": "sha512-g8rlu9GxB8Ut/F8WGx4zidIPQ4pcYFjU9bZO+fyRIPrSUFH2bKijCnbZcr4ntqzDGx74hwD6cCG4DBoleq2UlQ==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "chrome-trace-event": "^1.0.3", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/resolver-default": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/resolver-default/-/resolver-default-2.12.0.tgz", + "integrity": "sha512-uuhbajTax37TwCxu7V98JtRLiT6hzE4VYSu5B7Qkauy14/WFt2dz6GOUXPgVsED569/hkxebPx3KCMtZW6cHHA==", + "dependencies": { + "@parcel/node-resolver-core": "3.3.0", + "@parcel/plugin": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/runtime-browser-hmr": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/runtime-browser-hmr/-/runtime-browser-hmr-2.12.0.tgz", + "integrity": "sha512-4ZLp2FWyD32r0GlTulO3+jxgsA3oO1P1b5oO2IWuWilfhcJH5LTiazpL5YdusUjtNn9PGN6QLAWfxmzRIfM+Ow==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/runtime-js": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/runtime-js/-/runtime-js-2.12.0.tgz", + "integrity": "sha512-sBerP32Z1crX5PfLNGDSXSdqzlllM++GVnVQVeM7DgMKS8JIFG3VLi28YkX+dYYGtPypm01JoIHCkvwiZEcQJg==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/runtime-react-refresh": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/runtime-react-refresh/-/runtime-react-refresh-2.12.0.tgz", + "integrity": "sha512-SCHkcczJIDFTFdLTzrHTkQ0aTrX3xH6jrA4UsCBL6ji61+w+ohy4jEEe9qCgJVXhnJfGLE43HNXek+0MStX+Mw==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "react-error-overlay": "6.0.9", + "react-refresh": "^0.9.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/runtime-react-refresh/node_modules/react-refresh": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.9.0.tgz", + "integrity": "sha512-Gvzk7OZpiqKSkxsQvO/mbTN1poglhmAV7gR/DdIrRrSMXraRQQlfikRJOr3Nb9GTMPC5kof948Zy6jJZIFtDvQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@parcel/runtime-service-worker": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/runtime-service-worker/-/runtime-service-worker-2.12.0.tgz", + "integrity": "sha512-BXuMBsfiwpIEnssn+jqfC3jkgbS8oxeo3C7xhSQsuSv+AF2FwY3O3AO1c1RBskEW3XrBLNINOJujroNw80VTKA==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/rust": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/rust/-/rust-2.12.0.tgz", + "integrity": "sha512-005cldMdFZFDPOjbDVEXcINQ3wT4vrxvSavRWI3Az0e3E18exO/x/mW9f648KtXugOXMAqCEqhFHcXECL9nmMw==", + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/source-map": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@parcel/source-map/-/source-map-2.1.1.tgz", + "integrity": "sha512-Ejx1P/mj+kMjQb8/y5XxDUn4reGdr+WyKYloBljpppUy8gs42T+BNoEOuRYqDVdgPc6NxduzIDoJS9pOFfV5Ew==", + "dependencies": { + "detect-libc": "^1.0.3" + }, + "engines": { + "node": "^12.18.3 || >=14" + } + }, + "node_modules/@parcel/transformer-babel": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-babel/-/transformer-babel-2.12.0.tgz", + "integrity": "sha512-zQaBfOnf/l8rPxYGnsk/ufh/0EuqvmnxafjBIpKZ//j6rGylw5JCqXSb1QvvAqRYruKeccxGv7+HrxpqKU6V4A==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/utils": "2.12.0", + "browserslist": "^4.6.6", + "json5": "^2.2.0", + "nullthrows": "^1.1.1", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-babel/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/transformer-css": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-css/-/transformer-css-2.12.0.tgz", + "integrity": "sha512-vXhOqoAlQGATYyQ433Z1DXKmiKmzOAUmKysbYH3FD+LKEKLMEl/pA14goqp00TW+A/EjtSKKyeMyHlMIIUqj4Q==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/utils": "2.12.0", + "browserslist": "^4.6.6", + "lightningcss": "^1.22.1", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-html": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-html/-/transformer-html-2.12.0.tgz", + "integrity": "sha512-5jW4dFFBlYBvIQk4nrH62rfA/G/KzVzEDa6S+Nne0xXhglLjkm64Ci9b/d4tKZfuGWUbpm2ASAq8skti/nfpXw==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "nullthrows": "^1.1.1", + "posthtml": "^0.16.5", + "posthtml-parser": "^0.10.1", + "posthtml-render": "^3.0.0", + "semver": "^7.5.2", + "srcset": "4" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-html/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/transformer-image": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-image/-/transformer-image-2.12.0.tgz", + "integrity": "sha512-8hXrGm2IRII49R7lZ0RpmNk27EhcsH+uNKsvxuMpXPuEnWgC/ha/IrjaI29xCng1uGur74bJF43NUSQhR4aTdw==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "@parcel/workers": "2.12.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/transformer-js": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-js/-/transformer-js-2.12.0.tgz", + "integrity": "sha512-OSZpOu+FGDbC/xivu24v092D9w6EGytB3vidwbdiJ2FaPgfV7rxS0WIUjH4I0OcvHAcitArRXL0a3+HrNTdQQw==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/utils": "2.12.0", + "@parcel/workers": "2.12.0", + "@swc/helpers": "^0.5.0", + "browserslist": "^4.6.6", + "nullthrows": "^1.1.1", + "regenerator-runtime": "^0.13.7", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@parcel/transformer-js/node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, + "node_modules/@parcel/transformer-js/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/transformer-json": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-json/-/transformer-json-2.12.0.tgz", + "integrity": "sha512-Utv64GLRCQILK5r0KFs4o7I41ixMPllwOLOhkdjJKvf1hZmN6WqfOmB1YLbWS/y5Zb/iB52DU2pWZm96vLFQZQ==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "json5": "^2.2.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-postcss": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-postcss/-/transformer-postcss-2.12.0.tgz", + "integrity": "sha512-FZqn+oUtiLfPOn67EZxPpBkfdFiTnF4iwiXPqvst3XI8H+iC+yNgzmtJkunOOuylpYY6NOU5jT8d7saqWSDv2Q==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/utils": "2.12.0", + "clone": "^2.1.1", + "nullthrows": "^1.1.1", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-postcss/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/transformer-posthtml": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-posthtml/-/transformer-posthtml-2.12.0.tgz", + "integrity": "sha512-z6Z7rav/pcaWdeD+2sDUcd0mmNZRUvtHaUGa50Y2mr+poxrKilpsnFMSiWBT+oOqPt7j71jzDvrdnAF4XkCljg==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1", + "posthtml": "^0.16.5", + "posthtml-parser": "^0.10.1", + "posthtml-render": "^3.0.0", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-posthtml/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/transformer-raw": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-raw/-/transformer-raw-2.12.0.tgz", + "integrity": "sha512-Ht1fQvXxix0NncdnmnXZsa6hra20RXYh1VqhBYZLsDfkvGGFnXIgO03Jqn4Z8MkKoa0tiNbDhpKIeTjyclbBxQ==", + "dependencies": { + "@parcel/plugin": "2.12.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-react-refresh-wrap": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-react-refresh-wrap/-/transformer-react-refresh-wrap-2.12.0.tgz", + "integrity": "sha512-GE8gmP2AZtkpBIV5vSCVhewgOFRhqwdM5Q9jNPOY5PKcM3/Ff0qCqDiTzzGLhk0/VMBrdjssrfZkVx6S/lHdJw==", + "dependencies": { + "@parcel/plugin": "2.12.0", + "@parcel/utils": "2.12.0", + "react-refresh": "^0.9.0" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-react-refresh-wrap/node_modules/react-refresh": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.9.0.tgz", + "integrity": "sha512-Gvzk7OZpiqKSkxsQvO/mbTN1poglhmAV7gR/DdIrRrSMXraRQQlfikRJOr3Nb9GTMPC5kof948Zy6jJZIFtDvQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@parcel/transformer-svg": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/transformer-svg/-/transformer-svg-2.12.0.tgz", + "integrity": "sha512-cZJqGRJ4JNdYcb+vj94J7PdOuTnwyy45dM9xqbIMH+HSiiIkfrMsdEwYft0GTyFTdsnf+hdHn3tau7Qa5hhX+A==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/plugin": "2.12.0", + "@parcel/rust": "2.12.0", + "nullthrows": "^1.1.1", + "posthtml": "^0.16.5", + "posthtml-parser": "^0.10.1", + "posthtml-render": "^3.0.0", + "semver": "^7.5.2" + }, + "engines": { + "node": ">= 12.0.0", + "parcel": "^2.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/transformer-svg/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@parcel/types": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/types/-/types-2.12.0.tgz", + "integrity": "sha512-8zAFiYNCwNTQcglIObyNwKfRYQK5ELlL13GuBOrSMxueUiI5ylgsGbTS1N7J3dAGZixHO8KhHGv5a71FILn9rQ==", + "dependencies": { + "@parcel/cache": "2.12.0", + "@parcel/diagnostic": "2.12.0", + "@parcel/fs": "2.12.0", + "@parcel/package-manager": "2.12.0", + "@parcel/source-map": "^2.1.1", + "@parcel/workers": "2.12.0", + "utility-types": "^3.10.0" + } + }, + "node_modules/@parcel/utils": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/utils/-/utils-2.12.0.tgz", + "integrity": "sha512-z1JhLuZ8QmDaYoEIuUCVZlhcFrS7LMfHrb2OCRui5SQFntRWBH2fNM6H/fXXUkT9SkxcuFP2DUA6/m4+Gkz72g==", + "dependencies": { + "@parcel/codeframe": "2.12.0", + "@parcel/diagnostic": "2.12.0", + "@parcel/logger": "2.12.0", + "@parcel/markdown-ansi": "2.12.0", + "@parcel/rust": "2.12.0", + "@parcel/source-map": "^2.1.1", + "chalk": "^4.1.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/utils/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@parcel/utils/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@parcel/utils/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@parcel/utils/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/@parcel/utils/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/utils/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@parcel/watcher": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.4.1.tgz", + "integrity": "sha512-HNjmfLQEVRZmHRET336f20H/8kOozUGwk7yajvsonjNxbj2wBTK1WsQuHkD5yYh9RxFGL2EyDHryOihOwUoKDA==", + "dependencies": { + "detect-libc": "^1.0.3", + "is-glob": "^4.0.3", + "micromatch": "^4.0.5", + "node-addon-api": "^7.0.0" + }, + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.4.1", + "@parcel/watcher-darwin-arm64": "2.4.1", + "@parcel/watcher-darwin-x64": "2.4.1", + "@parcel/watcher-freebsd-x64": "2.4.1", + "@parcel/watcher-linux-arm-glibc": "2.4.1", + "@parcel/watcher-linux-arm64-glibc": "2.4.1", + "@parcel/watcher-linux-arm64-musl": "2.4.1", + "@parcel/watcher-linux-x64-glibc": "2.4.1", + "@parcel/watcher-linux-x64-musl": "2.4.1", + "@parcel/watcher-win32-arm64": "2.4.1", + "@parcel/watcher-win32-ia32": "2.4.1", + "@parcel/watcher-win32-x64": "2.4.1" + } + }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.4.1.tgz", + "integrity": "sha512-LOi/WTbbh3aTn2RYddrO8pnapixAziFl6SMxHM69r3tvdSm94JtCenaKgk1GRg5FJ5wpMCpHeW+7yqPlvZv7kg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-arm64": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.4.1.tgz", + "integrity": "sha512-ln41eihm5YXIY043vBrrHfn94SIBlqOWmoROhsMVTSXGh0QahKGy77tfEywQ7v3NywyxBBkGIfrWRHm0hsKtzA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.4.1.tgz", + "integrity": "sha512-yrw81BRLjjtHyDu7J61oPuSoeYWR3lDElcPGJyOvIXmor6DEo7/G2u1o7I38cwlcoBHQFULqF6nesIX3tsEXMg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.4.1.tgz", + "integrity": "sha512-TJa3Pex/gX3CWIx/Co8k+ykNdDCLx+TuZj3f3h7eOjgpdKM+Mnix37RYsYU4LHhiYJz3DK5nFCCra81p6g050w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.4.1.tgz", + "integrity": "sha512-4rVYDlsMEYfa537BRXxJ5UF4ddNwnr2/1O4MHM5PjI9cvV2qymvhwZSFgXqbS8YoTk5i/JR0L0JDs69BUn45YA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.4.1.tgz", + "integrity": "sha512-BJ7mH985OADVLpbrzCLgrJ3TOpiZggE9FMblfO65PlOCdG++xJpKUJ0Aol74ZUIYfb8WsRlUdgrZxKkz3zXWYA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.4.1.tgz", + "integrity": "sha512-p4Xb7JGq3MLgAfYhslU2SjoV9G0kI0Xry0kuxeG/41UfpjHGOhv7UoUDAz/jb1u2elbhazy4rRBL8PegPJFBhA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.4.1.tgz", + "integrity": "sha512-s9O3fByZ/2pyYDPoLM6zt92yu6P4E39a03zvO0qCHOTjxmt3GHRMLuRZEWhWLASTMSrrnVNWdVI/+pUElJBBBg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.4.1.tgz", + "integrity": "sha512-L2nZTYR1myLNST0O632g0Dx9LyMNHrn6TOt76sYxWLdff3cB22/GZX2UPtJnaqQPdCRoszoY5rcOj4oMTtp5fQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.4.1.tgz", + "integrity": "sha512-Uq2BPp5GWhrq/lcuItCHoqxjULU1QYEcyjSO5jqqOK8RNFDBQnenMMx4gAl3v8GiWa59E9+uDM7yZ6LxwUIfRg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.4.1.tgz", + "integrity": "sha512-maNRit5QQV2kgHFSYwftmPBxiuK5u4DXjbXx7q6eKjq5dsLXZ4FJiVvlcw35QXzk0KrUecJmuVFbj4uV9oYrcw==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.4.1.tgz", + "integrity": "sha512-+DvS92F9ezicfswqrvIRM2njcYJbd5mb9CUgtrHCHmvn7pPPa+nMDRu1o1bYYz/l5IB2NVGNJWiH7h1E58IF2A==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/workers": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/@parcel/workers/-/workers-2.12.0.tgz", + "integrity": "sha512-zv5We5Jmb+ZWXlU6A+AufyjY4oZckkxsZ8J4dvyWL0W8IQvGO1JB4FGeryyttzQv3RM3OxcN/BpTGPiDG6keBw==", + "dependencies": { + "@parcel/diagnostic": "2.12.0", + "@parcel/logger": "2.12.0", + "@parcel/profiler": "2.12.0", + "@parcel/types": "2.12.0", + "@parcel/utils": "2.12.0", + "nullthrows": "^1.1.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "peerDependencies": { + "@parcel/core": "^2.12.0" + } + }, + "node_modules/@popperjs/core": { + "version": "2.11.8", + "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", + "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/popperjs" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.0.1.tgz", + "integrity": "sha512-T5gIdVO2mmPW3NNhjNgEP3cqMXjXL9UbO0BzWcXfvdBs+BohbQxvd/K5hSVKmn9/lbTdsQVKbUcP5WLCwvUbBg==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@radix-ui/primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", + "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@radix-ui/react-accessible-icon": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accessible-icon/-/react-accessible-icon-1.0.3.tgz", + "integrity": "sha512-duVGKeWPSUILr/MdlPxV+GeULTc2rS1aihGdQ3N2qCUPMgxYLxvAsHJM3mCVLF8d5eK+ympmB22mb1F3a5biNw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz", + "integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collapsible": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.0.3.tgz", + "integrity": "sha512-UBmVDkmR6IvDsloHVN+3rtx4Mi5TFvylYXpluuv0f37dtaz3H99bp8No0LGXRigVpl3UAT4l9j6bIchh42S/Gg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.0.3.tgz", + "integrity": "sha512-3SzW+0PW7yBBoQlT8wNcGtaxaD0XSu0uLUFgrtHY08Acx05TaHaOmVLR73c0j/cqpDy53KBMO7s0dx2wmOIDIA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", + "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", + "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.5.tgz", + "integrity": "sha512-GjWJX/AUpB703eEBanuBnIWdIXg6NvJFCXcNlSZk4xdszCdhrJgBoUd1cGk67vFO+WdA2pfI/plOpqz/5GUP6Q==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.5", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.4", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-portal": "1.0.4", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.0.1.tgz", + "integrity": "sha512-RXcvnXgyvYvBEOhCBuddKecVkoMiI10Jcm5cTI7abJRAHYfFxeu+FBQs/DvdxSYucxR5mna0dNsL6QFlds5TMA==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.5.tgz", + "integrity": "sha512-aJeDjQhywg9LBu2t/At58hCvr7pEm0o2Ke1x33B+MhjNmmZ17sy4KImo0KPLgsnc/zN7GPdce8Cnn0SWvwZO7g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-escape-keydown": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.1.tgz", + "integrity": "sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.4.tgz", + "integrity": "sha512-sL04Mgvf+FmyvZeYfNu1EPAaaxD+aw7cYeIB9L9Fvq8+urhltTRaEo5ysKOpHuKPclsZcSUMKlN05x4u+CINpA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", + "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.0.7.tgz", + "integrity": "sha512-shtvVnlsxT6faMnK/a7n0wptwBD23xc1Z5mdrtKLwVEfsEMXodS0r5s0/g5P0hX//EKYZS2sxUjqfzlg52ZSnQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.5", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.4", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.3", + "@radix-ui/react-portal": "1.0.4", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.3.tgz", + "integrity": "sha512-cKpopj/5RHZWjrbF2846jBNacjQVwkP068DfmgrNJXpvVWrOvlAmE9xSiy5OqeE+Gi8D9fP+oDhUnPqNMY8/5w==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-rect": "1.0.1", + "@radix-ui/react-use-size": "1.0.1", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.4.tgz", + "integrity": "sha512-Qki+C/EuGUVCQTOTD5vzJzJuMUlewbzuKyUy+/iHM2uwGiru9gZeBJtHAPKAEkB5KWGi9mP/CHKcY0wt1aW45Q==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", + "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", + "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.0.tgz", + "integrity": "sha512-aSzvnYpP725CROcxAOEBVZZSIQVQdHgBr2QQFKySsaD14u8dNT0batuXI+AAGDdAHfXH8rbnHmjYFqVJ21KkRg==", + "dependencies": { + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz", + "integrity": "sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-context": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.0.tgz", + "integrity": "sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-primitive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.0.tgz", + "integrity": "sha512-ZSpFm0/uHa8zTvKBDjLFWLo8dkr4MBsiDLz0g3gMUwqgLHz9rTaRRGYDgvZPtBJgYCBKXkS9fzmoySgr8CO6Cw==", + "dependencies": { + "@radix-ui/react-slot": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.0.4.tgz", + "integrity": "sha512-2mUg5Mgcu001VkGy+FfzZyzbmuUWzgWkj3rvv4yu+mLw03+mTzbxZHvfcGyFp2b8EkQeMkpRQ5FiA2Vr2O6TeQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-1.2.2.tgz", + "integrity": "sha512-zI7McXr8fNaSrUY9mZe4x/HC0jTLY9fWNhO1oLWYMQGDXuV4UCivIGTxwioSzO0ZCYX9iSLyWmAh/1TOmX3Cnw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/number": "1.0.1", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-previous": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz", + "integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-escape-keydown": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-scope": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.3.tgz", + "integrity": "sha512-upXdPfqI4islj2CslyfUBNlaJCPybbqRHAi1KER7Isel9Q2AtSJ0zRBZv8mWQiFXD2nyAJ4BhC3yXgZ6kMBSrQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-popper": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz", + "integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-rect": "1.0.1", + "@radix-ui/react-use-size": "1.0.1", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-portal": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz", + "integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", + "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-switch": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.0.3.tgz", + "integrity": "sha512-mxm87F88HyHztsI7N+ZUmEoARGkC22YVW5CaC+Byc+HRpuvCrOBPTAnXgf+tZ/7i0Sg/eOePGdMhUKhPaQEqow==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-use-previous": "1.0.1", + "@radix-ui/react-use-size": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.0.4.tgz", + "integrity": "sha512-egZfYY/+wRNCflXNHx+dePvnz9FbmssDTJBtgRfDY7e8SE5oIo3Py2eCB1ckAbh1Q7cQ/6yJZThJ++sgbxibog==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-roving-focus": "1.0.4", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.1.5.tgz", + "integrity": "sha512-fRLn227WHIBRSzuRzGJ8W+5YALxofH23y0MlPLddaIpLpCDqdE0NZlS2NRQDRiptfxDeeCjgFIpexB1/zkxDlw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.5", + "@radix-ui/react-portal": "1.0.4", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.7.tgz", + "integrity": "sha512-lPh5iKNFVQ/jav/j6ZrWq3blfDJ0OH9R6FlNUHPMqdLuQ9vwDgFsRxvl8b7Asuy5c8xmoojHUxKHQSOAvMHxyw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.5", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.3", + "@radix-ui/react-portal": "1.0.4", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", + "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", + "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", + "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", + "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.0.1.tgz", + "integrity": "sha512-cV5La9DPwiQ7S0gf/0qiD6YgNqM5Fk97Kdrlc5yBcrF3jyEZQwm7vYFqMo4IfeHgJXsRaMvLABFtd0OVEmZhDw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz", + "integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz", + "integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz", + "integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz", + "integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@react-dnd/asap": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@react-dnd/asap/-/asap-5.0.2.tgz", + "integrity": "sha512-WLyfoHvxhs0V9U+GTsGilGgf2QsPl6ZZ44fnv0/b8T3nQyvzxidxsg/ZltbWssbsRDlYW8UKSQMTGotuTotZ6A==" + }, + "node_modules/@react-dnd/invariant": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@react-dnd/invariant/-/invariant-4.0.2.tgz", + "integrity": "sha512-xKCTqAK/FFauOM9Ta2pswIyT3D8AQlfrYdOi/toTPEhqCuAs1v5tcJ3Y08Izh1cJ5Jchwy9SeAXmMg6zrKs2iw==" + }, + "node_modules/@react-dnd/shallowequal": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@react-dnd/shallowequal/-/shallowequal-4.0.2.tgz", + "integrity": "sha512-/RVXdLvJxLg4QKvMoM5WlwNR9ViO9z8B/qPcc+C0Sa/teJY7QG7kJ441DwzOjMYEY7GmU4dj5EcGHIkKZiQZCA==" + }, + "node_modules/@reactflow/background": { + "version": "11.3.13", + "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.3.13.tgz", + "integrity": "sha512-hkvpVEhgvfTDyCvdlitw4ioKCYLaaiRXnuEG+1QM3Np+7N1DiWF1XOv5I8AFyNoJL07yXEkbECUTsHvkBvcG5A==", + "dependencies": { + "@reactflow/core": "11.11.3", + "classcat": "^5.0.3", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/controls": { + "version": "11.2.13", + "resolved": "https://registry.npmjs.org/@reactflow/controls/-/controls-11.2.13.tgz", + "integrity": "sha512-3xgEg6ALIVkAQCS4NiBjb7ad8Cb3D8CtA7Vvl4Hf5Ar2PIVs6FOaeft9s2iDZGtsWP35ECDYId1rIFVhQL8r+A==", + "dependencies": { + "@reactflow/core": "11.11.3", + "classcat": "^5.0.3", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/core": { + "version": "11.11.3", + "resolved": "https://registry.npmjs.org/@reactflow/core/-/core-11.11.3.tgz", + "integrity": "sha512-+adHdUa7fJSEM93fWfjQwyWXeI92a1eLKwWbIstoCakHpL8UjzwhEh6sn+mN2h/59MlVI7Ehr1iGTt3MsfcIFA==", + "dependencies": { + "@types/d3": "^7.4.0", + "@types/d3-drag": "^3.0.1", + "@types/d3-selection": "^3.0.3", + "@types/d3-zoom": "^3.0.1", + "classcat": "^5.0.3", + "d3-drag": "^3.0.0", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/minimap": { + "version": "11.7.13", + "resolved": "https://registry.npmjs.org/@reactflow/minimap/-/minimap-11.7.13.tgz", + "integrity": "sha512-m2MvdiGSyOu44LEcERDEl1Aj6x//UQRWo3HEAejNU4HQTlJnYrSN8tgrYF8TxC1+c/9UdyzQY5VYgrTwW4QWdg==", + "dependencies": { + "@reactflow/core": "11.11.3", + "@types/d3-selection": "^3.0.3", + "@types/d3-zoom": "^3.0.1", + "classcat": "^5.0.3", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/node-resizer": { + "version": "2.2.13", + "resolved": "https://registry.npmjs.org/@reactflow/node-resizer/-/node-resizer-2.2.13.tgz", + "integrity": "sha512-X7ceQ2s3jFLgbkg03n2RYr4hm3jTVrzkW2W/8ANv/SZfuVmF8XJxlERuD8Eka5voKqLda0ywIZGAbw9GoHLfUQ==", + "dependencies": { + "@reactflow/core": "11.11.3", + "classcat": "^5.0.4", + "d3-drag": "^3.0.0", + "d3-selection": "^3.0.0", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@reactflow/node-toolbar": { + "version": "1.3.13", + "resolved": "https://registry.npmjs.org/@reactflow/node-toolbar/-/node-toolbar-1.3.13.tgz", + "integrity": "sha512-aknvNICO10uWdthFSpgD6ctY/CTBeJUMV9co8T9Ilugr08Nb89IQ4uD0dPmr031ewMQxixtYIkw+sSDDzd2aaQ==", + "dependencies": { + "@reactflow/core": "11.11.3", + "classcat": "^5.0.3", + "zustand": "^4.4.1" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@remix-run/router": { + "version": "1.16.1", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.16.1.tgz", + "integrity": "sha512-es2g3dq6Nb07iFxGk5GuHN20RwBZOsuDQN7izWIisUcv9r+d2C5jQxqmgkdebXgReWfiyUabcki6Fg77mSNrig==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rollup/pluginutils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.0.tgz", + "integrity": "sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==", + "dev": true, + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rtsao/scc": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz", + "integrity": "sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.3.tgz", + "integrity": "sha512-qC/xYId4NMebE6w/V33Fh9gWxLgURiNYgVNObbJl2LZv0GUUItCcCqC5axQSwRaAgaxl2mELq1rMzlswaQ0Zxg==", + "dev": true + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", + "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", + "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", + "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", + "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", + "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", + "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", + "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "dev": true, + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", + "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", + "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true, + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", + "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "^6.0.0" + } + }, + "node_modules/@swc/core": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.5.28.tgz", + "integrity": "sha512-muCdNIqOTURUgYeyyOLYE3ShL8SZO6dw6bhRm6dCvxWzCZOncPc5fB0kjcPXTML+9KJoHL7ks5xg+vsQK+v6ig==", + "hasInstallScript": true, + "dependencies": { + "@swc/counter": "^0.1.3", + "@swc/types": "^0.1.8" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.5.28", + "@swc/core-darwin-x64": "1.5.28", + "@swc/core-linux-arm-gnueabihf": "1.5.28", + "@swc/core-linux-arm64-gnu": "1.5.28", + "@swc/core-linux-arm64-musl": "1.5.28", + "@swc/core-linux-x64-gnu": "1.5.28", + "@swc/core-linux-x64-musl": "1.5.28", + "@swc/core-win32-arm64-msvc": "1.5.28", + "@swc/core-win32-ia32-msvc": "1.5.28", + "@swc/core-win32-x64-msvc": "1.5.28" + }, + "peerDependencies": { + "@swc/helpers": "*" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.5.28.tgz", + "integrity": "sha512-sP6g63ybzIdOWNDbn51tyHN8EMt7Mb4RMeHQEsXB7wQfDvzhpWB+AbfK6Gs3Q8fwP/pmWIrWW9csKOc1K2Mmkg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-darwin-x64": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.5.28.tgz", + "integrity": "sha512-Bd/agp/g7QocQG5AuorOzSC78t8OzeN+pCN/QvJj1CvPhvppjJw6e1vAbOR8vO2vvGi2pvtf3polrYQStJtSiA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.5.28.tgz", + "integrity": "sha512-Wr3TwPGIveS9/OBWm0r9VAL8wkCR0zQn46J8K01uYCmVhUNK3Muxjs0vQBZaOrGu94mqbj9OXY+gB3W7aDvGdA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.5.28.tgz", + "integrity": "sha512-8G1ZwVTuLgTAVTMPD+M97eU6WeiRIlGHwKZ5fiJHPBcz1xqIC7jQcEh7XBkobkYoU5OILotls3gzjRt8CMNyDQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.5.28.tgz", + "integrity": "sha512-0Ajdzb5Fzvz+XUbN5ESeHAz9aHHSYiQcm+vmsDi0TtPHmsalfnqEPZmnK0zPALPJPLQP2dDo4hELeDg3/c3xgA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.5.28.tgz", + "integrity": "sha512-ueQ9VejnQUM2Pt+vT0IAKoF4vYBWUP6n1KHGdILpoGe3LuafQrqu7RoyQ15C7/AYii7hAeNhTFdf6gLbg8cjFg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.5.28.tgz", + "integrity": "sha512-G5th8Mg0az8CbY4GQt9/m5hg2Y0kGIwvQBeVACuLQB6q2Y4txzdiTpjmFqUUhEvvl7Klyx1IHvNhfXs3zpt7PA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.5.28.tgz", + "integrity": "sha512-JezwCGavZ7CkNXx4yInI4kpb71L0zxzxA9BFlmnsGKEEjVQcKc3hFpmIzfFVs+eotlBUwDNb0+Yo9m6Cb7lllA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.5.28.tgz", + "integrity": "sha512-q8tW5J4RkOkl7vYShnWS//VAb2Ngolfm9WOMaF2GRJUr2Y/Xeb/+cNjdsNOqea2BzW049D5vdP7XPmir3/zUZw==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.5.28", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.5.28.tgz", + "integrity": "sha512-jap6EiB3wG1YE1hyhNr9KLPpH4PGm+5tVMfN0l7fgKtV0ikgpcEN/YF94tru+z5m2HovqYW009+Evq9dcVGmpg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" + }, + "node_modules/@swc/helpers": { + "version": "0.5.11", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.11.tgz", + "integrity": "sha512-YNlnKRWF2sVojTpIyzwou9XoTNbzbzONwRhOoniEioF1AtaitTvVZblaQRrAzChWQ1bLYyYSWzM18y4WwgzJ+A==", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@swc/types": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.8.tgz", + "integrity": "sha512-RNFA3+7OJFNYY78x0FYwi1Ow+iF1eF5WvmfY1nXPOEH4R2p/D4Cr1vzje7dNAI2aLFqpv8Wyz4oKSWqIZArpQA==", + "dependencies": { + "@swc/counter": "^0.1.3" + } + }, + "node_modules/@tanstack/match-sorter-utils": { + "version": "8.15.1", + "resolved": "https://registry.npmjs.org/@tanstack/match-sorter-utils/-/match-sorter-utils-8.15.1.tgz", + "integrity": "sha512-PnVV3d2poenUM31ZbZi/yXkBu3J7kd5k2u51CGwwNojag451AjTH9N6n41yjXz2fpLeewleyLBmNS6+HcGDlXw==", + "dependencies": { + "remove-accents": "0.5.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/query-core": { + "version": "4.36.1", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.36.1.tgz", + "integrity": "sha512-DJSilV5+ytBP1FbFcEJovv4rnnm/CokuVvrBEtW/Va9DvuJ3HksbXUJEpI0aV1KtuL4ZoO9AVE6PyNLzF7tLeA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "4.36.1", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.36.1.tgz", + "integrity": "sha512-y7ySVHFyyQblPl3J3eQBWpXZkliroki3ARnBKsdJchlgt7yJLRDUcf4B8soufgiYt3pEQIkBWBx1N9/ZPIeUWw==", + "license": "MIT", + "dependencies": { + "@tanstack/query-core": "4.36.1", + "use-sync-external-store": "^1.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-native": "*" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, + "node_modules/@tanstack/react-table": { + "version": "8.17.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-table/-/react-table-8.17.3.tgz", + "integrity": "sha512-5gwg5SvPD3lNAXPuJJz1fOCEZYk9/GeBFH3w/hCgnfyszOIzwkwgp5I7Q4MJtn0WECp84b5STQUDdmvGi8m3nA==", + "dependencies": { + "@tanstack/table-core": "8.17.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/@tanstack/table-core": { + "version": "8.17.3", + "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.17.3.tgz", + "integrity": "sha512-mPBodDGVL+fl6d90wUREepHa/7lhsghg2A3vFpakEhrhtbIlgNAZiMr7ccTgak5qbHqF14Fwy+W1yFWQt+WmYQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/cookie": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz", + "integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==", + "dev": true + }, + "node_modules/@types/d3": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", + "dependencies": { + "@types/d3-array": "*", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-delaunay": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", + "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==" + }, + "node_modules/@types/d3-axis": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==" + }, + "node_modules/@types/d3-contour": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==" + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.6.tgz", + "integrity": "sha512-4fvZhzMeeuBJYZXRXrRIQnvUYfyXwYmLsdiN7XXmVNQKKw1cM8a5WdID0g1hVFZDqT9ZqZEY5pD44p24VS7iZQ==" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==" + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.9.tgz", + "integrity": "sha512-IKtvyFdb4Q0LWna6ymywQsEYjK/94SGhPrMfEr1TIc5OBeziTi+1jcCvttts8e0UWZIxpasjnQk9MNk/3iS+kA==" + }, + "node_modules/@types/d3-format": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==" + }, + "node_modules/@types/d3-geo": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-P2dlU/q51fkOc/Gfl3Ul9kicV7l+ra934qBFXCFhrZMOL6du1TM0pm1ThYvENukyOn5h9v+yMJ9Fn5JK4QozrQ==" + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==" + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==" + }, + "node_modules/@types/d3-random": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.8.tgz", + "integrity": "sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ==", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz", + "integrity": "sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw==" + }, + "node_modules/@types/d3-selection": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.10.tgz", + "integrity": "sha512-cuHoUgS/V3hLdjJOLTT691+G2QoqAjCVLmr4kJXR4ha56w1Zdu8UUQ5TxLRqudgNjwXeQxKMq4j+lyf9sWuslg==" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.6.tgz", + "integrity": "sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA==", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.3.tgz", + "integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw==" + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.8.tgz", + "integrity": "sha512-ew63aJfQ/ms7QQ4X7pk5NxQ9fZH/z+i24ZfJ6tJSfqxJMrYLiK01EAs2/Rtw/JreGUsS3pLPNV644qXFGnoZNQ==", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dev": true, + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true + }, + "node_modules/@types/geojson": { + "version": "7946.0.14", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.14.tgz", + "integrity": "sha512-WCfD5Ht3ZesJUsONdhvm84dmzWOiOzOAqOncN0++w0lBw1o8OuDNJF2McvvCef/yBqb/HYRahp1BYtODFQ8bRg==" + }, + "node_modules/@types/hoist-non-react-statics": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.5.tgz", + "integrity": "sha512-SbcrWzkKBw2cdwRTwQAswfpB9g9LJWfjtUeW/jvNwbhC8cpmmNYVePa+ncbUe0rGTQ7G3Ff6mYUN2VMfLVr+Sg==", + "dependencies": { + "@types/react": "*", + "hoist-non-react-statics": "^3.3.0" + } + }, + "node_modules/@types/howler": { + "version": "2.2.11", + "resolved": "https://registry.npmjs.org/@types/howler/-/howler-2.2.11.tgz", + "integrity": "sha512-7aBoUL6RbSIrqKnpEgfa1wSNUBK06mn08siP2QI0zYk7MXfEJAaORc4tohamQYqCqVESoDyRWSdQn2BOKWj2Qw==", + "dev": true + }, + "node_modules/@types/js-levenshtein": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@types/js-levenshtein/-/js-levenshtein-1.1.3.tgz", + "integrity": "sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ==", + "dev": true + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", + "dev": true + }, + "node_modules/@types/lodash": { + "version": "4.17.5", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.5.tgz", + "integrity": "sha512-MBIOHVZqVqgfro1euRDWX7OO0fBVUUMrN6Pwm8LQsz8cWhEpihlvR70ENj3f40j58TNxZaWv2ndSkInykNBBJw==", + "dev": true + }, + "node_modules/@types/lodash.debounce": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/lodash.debounce/-/lodash.debounce-4.0.9.tgz", + "integrity": "sha512-Ma5JcgTREwpLRwMM+XwBR7DaWe96nC38uCBDFKZWbNKD+osjVzdpnUSwBcqCptrp16sSOLBAUb50Car5I0TCsQ==", + "dev": true, + "dependencies": { + "@types/lodash": "*" + } + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==", + "dev": true + }, + "node_modules/@types/node": { + "version": "18.19.34", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.34.tgz", + "integrity": "sha512-eXF4pfBNV5DAMKGbI02NnDtWrQ40hAN558/2vvS4gMpMIxaf6JmD7YjnZbq0Q9TDSSkKBamime8ewRoomHdt4g==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" + }, + "node_modules/@types/react": { + "version": "18.3.3", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", + "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-color": { + "version": "3.0.12", + "resolved": "https://registry.npmjs.org/@types/react-color/-/react-color-3.0.12.tgz", + "integrity": "sha512-pr3uKE3lSvf7GFo1Rn2K3QktiZQFFrSgSGJ/3iMvSOYWt2pPAJ97rVdVfhWxYJZ8prAEXzoP2XX//3qGSQgu7Q==", + "dev": true, + "dependencies": { + "@types/react": "*", + "@types/reactcss": "*" + } + }, + "node_modules/@types/react-datepicker": { + "version": "4.19.6", + "resolved": "https://registry.npmjs.org/@types/react-datepicker/-/react-datepicker-4.19.6.tgz", + "integrity": "sha512-uH5fzxt9eXxnc+hDCy/iRSFqU2+9lR/q2lAmaG4WILMai1o3IOdpcV+VSypzBFJLTEC2jrfeDXcdol0CJVMq4g==", + "dev": true, + "dependencies": { + "@popperjs/core": "^2.9.2", + "@types/react": "*", + "date-fns": "^2.0.1", + "react-popper": "^2.2.5" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.0", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", + "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/react-transition-group": { + "version": "4.4.10", + "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.10.tgz", + "integrity": "sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==", + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/reactcss": { + "version": "1.2.12", + "resolved": "https://registry.npmjs.org/@types/reactcss/-/reactcss-1.2.12.tgz", + "integrity": "sha512-BrXUQ86/wbbFiZv8h/Q1/Q1XOsaHneYmCb/tHe9+M8XBAAUc2EHfdY0DY22ZZjVSaXr5ix7j+zsqO2eGZub8lQ==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", + "dev": true + }, + "node_modules/@types/set-cookie-parser": { + "version": "2.4.9", + "resolved": "https://registry.npmjs.org/@types/set-cookie-parser/-/set-cookie-parser-2.4.9.tgz", + "integrity": "sha512-bCorlULvl0xTdjj4BPUHX4cqs9I+go2TfW/7Do1nnFYWS0CPP429Qr1AY42kiFhCwLpvAkWFr1XIBHd8j6/MCQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/use-sync-external-store": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.3.tgz", + "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" + }, + "node_modules/@types/uuid": { + "version": "9.0.8", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz", + "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.32.1.tgz", + "integrity": "sha512-6u6Plg9nP/J1GRpe/vcjjabo6Uc5YQPAMxsgQyGC/I0RuukiG1wIe3+Vtg3IrSCVJDmqK3j8adrtzXSENRtFgg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.32.1", + "@typescript-eslint/type-utils": "8.32.1", + "@typescript-eslint/utils": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.32.1.tgz", + "integrity": "sha512-7IsIaIDeZn7kffk7qXC3o6Z4UblZJKV3UBpkvRNpr5NSyLji7tvTcvmnMNYuYLyh26mN8W723xpo3i4MlD33vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.32.1.tgz", + "integrity": "sha512-YmybwXUJcgGqgAp6bEsgpPXEg6dcCyPyCSr0CAAueacR/CCBi25G3V8gGQ2kRzQRBNol7VQknxMs9HvVa9Rvfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/typescript-estree": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.32.1.tgz", + "integrity": "sha512-Y3AP9EIfYwBb4kWGb+simvPaqQoT5oJuzzj9m0i6FCY6SPvlomY2Ei4UEMm7+FXtlNJbor80ximyslzaQF6xhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/utils": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.32.1.tgz", + "integrity": "sha512-DsSFNIgLSrc89gpq1LJB7Hm1YpuhK086DRDJSNrewcGvYloWW1vZLHBTIvarKZDcAORIy/uWNx8Gad+4oMpkSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.32.1", + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/typescript-estree": "8.32.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.32.1.tgz", + "integrity": "sha512-ar0tjQfObzhSaW3C3QNmTc5ofj0hDoNQ5XWrCy6zDyabdr0TWhCkClp+rywGNj/odAFBVzzJrK4tEq5M4Hmu4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "eslint-visitor-keys": "^4.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.4.tgz", + "integrity": "sha512-gJzzk+PQNznz8ysRrC0aOkBNVRBDtE1n53IqyqEf3PXrYwomFs5q4pGMizBMJF+ykh03insJ27hB8gSrD2Hn8A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/experimental-utils": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.62.0.tgz", + "integrity": "sha512-RTXpeB3eMkpoclG3ZHft6vG/Z30azNHuqY6wKPBHlVMZFuEvrtlEDe8gMqDb+SO+9hjC/pLekeSCryf9vMZlCw==", + "dev": true, + "dependencies": { + "@typescript-eslint/utils": "5.62.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.32.1.tgz", + "integrity": "sha512-LKMrmwCPoLhM45Z00O1ulb6jwyVr2kr3XJp+G+tSEZcbauNnScewcQwtJqXDhXeYPDEjZ8C1SjXm015CirEmGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.32.1", + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/typescript-estree": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.32.1.tgz", + "integrity": "sha512-7IsIaIDeZn7kffk7qXC3o6Z4UblZJKV3UBpkvRNpr5NSyLji7tvTcvmnMNYuYLyh26mN8W723xpo3i4MlD33vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.32.1.tgz", + "integrity": "sha512-YmybwXUJcgGqgAp6bEsgpPXEg6dcCyPyCSr0CAAueacR/CCBi25G3V8gGQ2kRzQRBNol7VQknxMs9HvVa9Rvfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.32.1.tgz", + "integrity": "sha512-Y3AP9EIfYwBb4kWGb+simvPaqQoT5oJuzzj9m0i6FCY6SPvlomY2Ei4UEMm7+FXtlNJbor80ximyslzaQF6xhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.32.1.tgz", + "integrity": "sha512-ar0tjQfObzhSaW3C3QNmTc5ofj0hDoNQ5XWrCy6zDyabdr0TWhCkClp+rywGNj/odAFBVzzJrK4tEq5M4Hmu4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "eslint-visitor-keys": "^4.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.32.1.tgz", + "integrity": "sha512-mv9YpQGA8iIsl5KyUPi+FGLm7+bA4fgXaeRcFKRDRwDMu4iwrSHeDPipwueNXhdIIZltwCJv+NkxftECbIZWfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "8.32.1", + "@typescript-eslint/utils": "8.32.1", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/scope-manager": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.32.1.tgz", + "integrity": "sha512-7IsIaIDeZn7kffk7qXC3o6Z4UblZJKV3UBpkvRNpr5NSyLji7tvTcvmnMNYuYLyh26mN8W723xpo3i4MlD33vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.32.1.tgz", + "integrity": "sha512-YmybwXUJcgGqgAp6bEsgpPXEg6dcCyPyCSr0CAAueacR/CCBi25G3V8gGQ2kRzQRBNol7VQknxMs9HvVa9Rvfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.32.1.tgz", + "integrity": "sha512-Y3AP9EIfYwBb4kWGb+simvPaqQoT5oJuzzj9m0i6FCY6SPvlomY2Ei4UEMm7+FXtlNJbor80ximyslzaQF6xhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/visitor-keys": "8.32.1", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/utils": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.32.1.tgz", + "integrity": "sha512-DsSFNIgLSrc89gpq1LJB7Hm1YpuhK086DRDJSNrewcGvYloWW1vZLHBTIvarKZDcAORIy/uWNx8Gad+4oMpkSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.32.1", + "@typescript-eslint/types": "8.32.1", + "@typescript-eslint/typescript-estree": "8.32.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "8.32.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.32.1.tgz", + "integrity": "sha512-ar0tjQfObzhSaW3C3QNmTc5ofj0hDoNQ5XWrCy6zDyabdr0TWhCkClp+rywGNj/odAFBVzzJrK4tEq5M4Hmu4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.32.1", + "eslint-visitor-keys": "^4.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz", + "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, + "node_modules/@vitejs/plugin-react": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-3.1.0.tgz", + "integrity": "sha512-AfgcRL8ZBhAlc3BFdigClmTUMISmmzHn7sB2h9U1odvc5U/MjWXsAaz18b/WoppUTDBzxOJwo2VdClfUcItu9g==", + "dev": true, + "dependencies": { + "@babel/core": "^7.20.12", + "@babel/plugin-transform-react-jsx-self": "^7.18.6", + "@babel/plugin-transform-react-jsx-source": "^7.19.6", + "magic-string": "^0.27.0", + "react-refresh": "^0.14.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.1.0-beta.0" + } + }, + "node_modules/@xmldom/xmldom": { + "version": "0.8.10", + "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz", + "integrity": "sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==", + "dev": true, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@zxing/text-encoding": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@zxing/text-encoding/-/text-encoding-0.9.0.tgz", + "integrity": "sha512-U/4aVJ2mxI0aDNI8Uq0wEhMgY+u4CNtEb0om3+y3+niDAsoTCOB33UF0sxpzqzdqXLqmvc+vZyAt4O8pPdfkwA==", + "dev": true, + "optional": true + }, + "node_modules/abortcontroller-polyfill": { + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/abortcontroller-polyfill/-/abortcontroller-polyfill-1.7.5.tgz", + "integrity": "sha512-JMJ5soJWP18htbbxJjG7bG6yuI6pRhgJ0scHHTfkUjf6wjP912xZWvM+A4sJK3gqd9E8fcPbDnOefbA9Th/FIQ==" + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/aria-hidden": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.4.tgz", + "integrity": "sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", + "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "is-array-buffer": "^3.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", + "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz", + "integrity": "sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz", + "integrity": "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", + "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ast-types-flow": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", + "dev": true + }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axe-core": { + "version": "4.10.3", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.3.tgz", + "integrity": "sha512-Xm7bpRXnDSX2YE2YFfBk2FnF0ep6tmG7xPh8iHee8MIcrgq762Nkce856dYtJYLkuIoYZvGfTs/PbZhideTcEg==", + "dev": true, + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axios": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz", + "integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/babel-plugin-macros": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", + "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "cosmiconfig": "^7.0.0", + "resolve": "^1.19.0" + }, + "engines": { + "node": ">=10", + "npm": ">=6" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", + "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", + "dev": true, + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.1", + "core-js-compat": "^3.36.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", + "dev": true, + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-transform-react-remove-prop-types": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-react-remove-prop-types/-/babel-plugin-transform-react-remove-prop-types-0.4.24.tgz", + "integrity": "sha512-eqj0hVcJUR57/Ug2zE1Yswsw4LhuqqHhD+8v120T1cl3kjg76QwtyBrdIk4WVwK+lAhBJVYCd/v+4nc4y+8JsA==", + "dev": true + }, + "node_modules/babel-preset-react-app": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-react-app/-/babel-preset-react-app-10.0.1.tgz", + "integrity": "sha512-b0D9IZ1WhhCWkrTXyFuIIgqGzSkRIH5D5AmB0bXbzYAB1OBAwHcUeyWW2LorutLWF5btNo/N7r/cIdmvvKJlYg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.16.0", + "@babel/plugin-proposal-class-properties": "^7.16.0", + "@babel/plugin-proposal-decorators": "^7.16.4", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0", + "@babel/plugin-proposal-numeric-separator": "^7.16.0", + "@babel/plugin-proposal-optional-chaining": "^7.16.0", + "@babel/plugin-proposal-private-methods": "^7.16.0", + "@babel/plugin-transform-flow-strip-types": "^7.16.0", + "@babel/plugin-transform-react-display-name": "^7.16.0", + "@babel/plugin-transform-runtime": "^7.16.4", + "@babel/preset-env": "^7.16.4", + "@babel/preset-react": "^7.16.0", + "@babel/preset-typescript": "^7.16.0", + "@babel/runtime": "^7.16.3", + "babel-plugin-macros": "^3.1.0", + "babel-plugin-transform-react-remove-prop-types": "^0.4.24" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/base-x": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.9.tgz", + "integrity": "sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ==", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.23.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.1.tgz", + "integrity": "sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001629", + "electron-to-chromium": "^1.4.796", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.16" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001632", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001632.tgz", + "integrity": "sha512-udx3o7yHJfUxMLkGohMlVHCvFvWmirKh9JAH/d7WOLPetlH+LTL5cocMZ0t7oZx/mdlOWXti97xLZWc8uURRHg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==" + }, + "node_modules/classnames": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==" + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-width": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", + "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", + "dev": true, + "engines": { + "node": ">= 10" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/cliui/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/cliui/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clone": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", + "integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/compute-scroll-into-view": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-2.0.4.tgz", + "integrity": "sha512-y/ZA3BGnxoM/QHHQ2Uy49CLtnWPbt4tTPpEEZiEmmiWBFKjej7nEyH8Ryz54jH0MLXflUYA3Er2zUxPSJu5R+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/confusing-browser-globals": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz", + "integrity": "sha512-JsPKdmh8ZkmnHxDk55FZ1TqVLvEQTvoByJZRN9jzI0UjxK/QgAmsphz7PGtqgPieQZ/CQcHWXCR7ATDNhGe+YA==", + "dev": true + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/cookie": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", + "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/core-js-compat": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.1.tgz", + "integrity": "sha512-9TNiImhKvQqSUkOvk/mMRZzOANTiEVC7WaBNhHcKM7x+/5E1l5NvsysR19zuDQScE8k+kfQXWRN3AtS/eOSHpg==", + "dev": true, + "dependencies": { + "browserslist": "^4.23.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/css-tree/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/csso": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "dependencies": { + "css-tree": "^1.1.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/damerau-levenshtein": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", + "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", + "dev": true + }, + "node_modules/data-view-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", + "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", + "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/inspect-js" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", + "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/date-fns": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", + "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "dependencies": { + "@babel/runtime": "^7.21.0" + }, + "engines": { + "node": ">=0.11" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/date-fns" + } + }, + "node_modules/debug": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/deepmerge": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz", + "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defaults/node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-libc": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", + "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "bin": { + "detect-libc": "bin/detect-libc.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dnd-core": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/dnd-core/-/dnd-core-16.0.1.tgz", + "integrity": "sha512-HK294sl7tbw6F6IeuK16YSBUoorvHpY8RHO+9yFfaJyCDVb6n7PRcezrOEOa2SBCqiYpemh5Jx20ZcjKdFAVng==", + "dependencies": { + "@react-dnd/asap": "^5.0.1", + "@react-dnd/invariant": "^4.0.1", + "redux": "^4.2.0" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dom-helpers": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", + "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==", + "dependencies": { + "@babel/runtime": "^7.8.7", + "csstype": "^3.0.2" + } + }, + "node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/dom-serializer/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dotenv": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-7.0.0.tgz", + "integrity": "sha512-M3NhsLbV1i6HuGzBUH8vXrtxOk+tWmzWKDMbAVSUp3Zsjm7ywFeuwrUXhmhQyRK1q5B5GGy7hcXPbj3bnfZg2g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/dotenv-expand": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-5.1.0.tgz", + "integrity": "sha512-YXQl1DSa4/PQyRfgrv6aoNjhasp/p4qs9FjJ4q4cQk+8m4r6k4ZSiEyytKG8f8W9gi8WsQtIObNmKd+tMzNTmA==" + }, + "node_modules/downshift": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.2.tgz", + "integrity": "sha512-iOv+E1Hyt3JDdL9yYcOgW7nZ7GQ2Uz6YbggwXvKUSleetYhU2nXD482Rz6CzvM4lvI1At34BYruKAL4swRGxaA==", + "dependencies": { + "@babel/runtime": "^7.14.8", + "compute-scroll-into-view": "^2.0.4", + "prop-types": "^15.7.2", + "react-is": "^17.0.2", + "tslib": "^2.3.0" + }, + "peerDependencies": { + "react": ">=16.12.0" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.799", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.799.tgz", + "integrity": "sha512-3D3DwWkRTzrdEpntY0hMLYwj7SeBk1138CkPE8sBDSj3WzrzOiG2rHm3luw8jucpf+WiyLBCZyU9lMHyQI9M9Q==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true + }, + "node_modules/entities": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-3.0.1.tgz", + "integrity": "sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-abstract": { + "version": "1.23.9", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.9.tgz", + "integrity": "sha512-py07lI0wjxAC/DcfK1S6G7iANonniZwTISvdPzk9hzeH0IZIshbuuFxLIU96OyF89Yb9hiqWn8M/bY83KY5vzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.0", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.2", + "is-regex": "^1.2.1", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.0", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.3", + "object-keys": "^1.1.1", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.3", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.18" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.1.tgz", + "integrity": "sha512-uDn+FE1yrDzyC0pCo961B2IHbdM8y/ACZsKD4dG6WqrjV53BADjwa7D+1aom2rsNVfLyDgU/eigvlJGJ08OQ4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.6", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "iterator.prototype": "^1.1.4", + "safe-array-concat": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", + "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", + "dev": true, + "dependencies": { + "hasown": "^2.0.0" + } + }, + "node_modules/es-to-primitive": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", + "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/esbuild": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-config-react-app": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-config-react-app/-/eslint-config-react-app-7.0.1.tgz", + "integrity": "sha512-K6rNzvkIeHaTd8m/QEh1Zko0KI7BACWkkneSs6s9cKZC/J27X3eZR6Upt1jkmZ/4FK+XUOPPxMEN7+lbUXfSlA==", + "dev": true, + "dependencies": { + "@babel/core": "^7.16.0", + "@babel/eslint-parser": "^7.16.3", + "@rushstack/eslint-patch": "^1.1.0", + "@typescript-eslint/eslint-plugin": "^5.5.0", + "@typescript-eslint/parser": "^5.5.0", + "babel-preset-react-app": "^10.0.1", + "confusing-browser-globals": "^1.0.11", + "eslint-plugin-flowtype": "^8.0.3", + "eslint-plugin-import": "^2.25.3", + "eslint-plugin-jest": "^25.3.0", + "eslint-plugin-jsx-a11y": "^6.5.1", + "eslint-plugin-react": "^7.27.1", + "eslint-plugin-react-hooks": "^4.3.0", + "eslint-plugin-testing-library": "^5.0.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "eslint": "^8.0.0" + } + }, + "node_modules/eslint-config-react-app/node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz", + "integrity": "sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/type-utils": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-config-react-app/node_modules/@typescript-eslint/parser": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-config-react-app/node_modules/@typescript-eslint/type-utils": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz", + "integrity": "sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-config-react-app/node_modules/eslint-plugin-react-hooks": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-config-react-app/node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint-import-resolver-node": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", + "dev": true, + "dependencies": { + "debug": "^3.2.7", + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" + } + }, + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-module-utils": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.0.tgz", + "integrity": "sha512-wALZ0HFoytlyh/1+4wuZ9FJCD/leWHQzzrxJ8+rebyReSLk7LApMyd3WJaLVoN+D5+WIdJyDK1c6JnE65V4Zyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7" + }, + "engines": { + "node": ">=4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-flowtype": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-flowtype/-/eslint-plugin-flowtype-8.0.3.tgz", + "integrity": "sha512-dX8l6qUL6O+fYPtpNRideCFSpmWOUVx5QcaGLVqe/vlDiBSe4vYljDWDETwnyFzpl7By/WVIu6rcrniCgH9BqQ==", + "dev": true, + "dependencies": { + "lodash": "^4.17.21", + "string-natural-compare": "^3.0.1" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@babel/plugin-syntax-flow": "^7.14.5", + "@babel/plugin-transform-react-jsx": "^7.14.9", + "eslint": "^8.1.0" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.31.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.31.0.tgz", + "integrity": "sha512-ixmkI62Rbc2/w8Vfxyh1jQRTdRTF52VxwRVHl/ykPAmqG+Nb7/kNn+byLP0LxPgI7zWA16Jt82SybJInmMia3A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rtsao/scc": "^1.1.0", + "array-includes": "^3.1.8", + "array.prototype.findlastindex": "^1.2.5", + "array.prototype.flat": "^1.3.2", + "array.prototype.flatmap": "^1.3.2", + "debug": "^3.2.7", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.12.0", + "hasown": "^2.0.2", + "is-core-module": "^2.15.1", + "is-glob": "^4.0.3", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "object.groupby": "^1.0.3", + "object.values": "^1.2.0", + "semver": "^6.3.1", + "string.prototype.trimend": "^1.0.8", + "tsconfig-paths": "^3.15.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-jest": { + "version": "25.7.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-jest/-/eslint-plugin-jest-25.7.0.tgz", + "integrity": "sha512-PWLUEXeeF7C9QGKqvdSbzLOiLTx+bno7/HC9eefePfEb257QFHg7ye3dh80AZVkaa/RQsBB1Q/ORQvg2X7F0NQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/experimental-utils": "^5.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "peerDependencies": { + "@typescript-eslint/eslint-plugin": "^4.0.0 || ^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "@typescript-eslint/eslint-plugin": { + "optional": true + }, + "jest": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-jsx-a11y": { + "version": "6.10.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz", + "integrity": "sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "aria-query": "^5.3.2", + "array-includes": "^3.1.8", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "^4.10.0", + "axobject-query": "^4.1.0", + "damerau-levenshtein": "^1.0.8", + "emoji-regex": "^9.2.2", + "hasown": "^2.0.2", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "safe-regex-test": "^1.0.3", + "string.prototype.includes": "^2.0.1" + }, + "engines": { + "node": ">=4.0" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.37.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz", + "integrity": "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.3", + "array.prototype.tosorted": "^1.1.4", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.2.1", + "estraverse": "^5.3.0", + "hasown": "^2.0.2", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.9", + "object.fromentries": "^2.0.8", + "object.values": "^1.2.1", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.12", + "string.prototype.repeat": "^1.0.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.2.0.tgz", + "integrity": "sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", + "dev": true, + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/eslint-plugin-testing-library": { + "version": "5.11.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-5.11.1.tgz", + "integrity": "sha512-5eX9e1Kc2PqVRed3taaLnAAqPZGEX75C+M/rXzUAI3wIg/ZxzUm1OVAwfe/O+vE+6YXOLetSe9g5GKD2ecXipw==", + "dev": true, + "dependencies": { + "@typescript-eslint/utils": "^5.58.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0", + "npm": ">=6" + }, + "peerDependencies": { + "eslint": "^7.5.0 || ^8.0.0" + } + }, + "node_modules/eslint-plugin-typescript": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-typescript/-/eslint-plugin-typescript-0.14.0.tgz", + "integrity": "sha512-2u1WnnDF2mkWWgU1lFQ2RjypUlmRoBEvQN02y9u+IL12mjWlkKFGEBnVsjs9Y8190bfPQCvWly1c2rYYUSOxWw==", + "deprecated": "Deprecated: Use @typescript-eslint/eslint-plugin instead", + "dev": true, + "dependencies": { + "requireindex": "~1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/eslint/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/eslint/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/eslint/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "dev": true + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "dev": true, + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/exenv": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/exenv/-/exenv-1.2.2.tgz", + "integrity": "sha512-Z+ktTxTwv9ILfgKCk32OX3n/doe+OcLTRtqK9pcL+JsP3J1/VW8Uvl4ZjLlKqeW4rzK4oesDOGMEMRIZqtP4Iw==" + }, + "node_modules/external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dev": true, + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-root": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", + "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==" + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/formik": { + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/formik/-/formik-2.4.6.tgz", + "integrity": "sha512-A+2EI7U7aG296q2TLGvNapDNTZp1khVt5Vk0Q/fyfSROss0V/V6+txt2aJnwEos44IxTCW/LYAi/zgWzlevj+g==", + "funding": [ + { + "type": "individual", + "url": "https://opencollective.com/formik" + } + ], + "dependencies": { + "@types/hoist-non-react-statics": "^3.3.1", + "deepmerge": "^2.1.1", + "hoist-non-react-statics": "^3.3.0", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "react-fast-compare": "^2.0.1", + "tiny-warning": "^1.0.2", + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/formik/node_modules/react-fast-compare": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-2.0.4.tgz", + "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==" + }, + "node_modules/framer-motion": { + "version": "8.5.5", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-8.5.5.tgz", + "integrity": "sha512-5IDx5bxkjWHWUF3CVJoSyUVOtrbAxtzYBBowRE2uYI/6VYhkEBD+rbTHEGuUmbGHRj6YqqSfoG7Aa1cLyWCrBA==", + "dependencies": { + "@motionone/dom": "^10.15.3", + "hey-listen": "^1.0.8", + "tslib": "^2.4.0" + }, + "optionalDependencies": { + "@emotion/is-prop-valid": "^0.8.2" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", + "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-port": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/get-port/-/get-port-4.2.0.tgz", + "integrity": "sha512-/b3jarXkH8KJoOMQc3uVGHASwGLPq3gSFJ7tgJm2diza+bydJPTGOibin2steecKeOylE8oY2JERlVWkAJO6yw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-symbol-description": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", + "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globalthis": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + "dev": true, + "dependencies": { + "define-properties": "^1.2.1", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globrex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/globrex/-/globrex-0.1.2.tgz", + "integrity": "sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==", + "dev": true + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/graphql": { + "version": "16.8.2", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.2.tgz", + "integrity": "sha512-cvVIBILwuoSyD54U4cF/UXDh5yAobhNV/tPygI4lZhgOIJQE/WLWC4waBRb4I6bDVYb3OVx3lfHbaQOEoUD5sg==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/has-bigints": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", + "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", + "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/headers-polyfill": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-3.3.0.tgz", + "integrity": "sha512-5e57etwBpNcDc0b6KCVWEh/Ro063OxPvzVimUdM0/tsYM/T7Hfy3kknIGj78SFTOhNd8AZY41U8mOHoO4LzmIQ==", + "dev": true + }, + "node_modules/hey-listen": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/hey-listen/-/hey-listen-1.0.8.tgz", + "integrity": "sha512-COpmrF2NOg4TBWUJ5UVyaCU2A88wEMkUPK4hNqyCkqHbxT92BbvfjoSozkAIIm6XhicGlJHhFdullInrdhwU8Q==" + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hoist-non-react-statics/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/howler": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/howler/-/howler-2.2.4.tgz", + "integrity": "sha512-iARIBPgcQrwtEr+tALF+rapJ8qSc+Set2GJQl7xT1MQzWaVkFebdJhR3alVlSiUf5U7nAANKuj3aWpwerocD5w==" + }, + "node_modules/html-parse-stringify": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-parse-stringify/-/html-parse-stringify-3.0.1.tgz", + "integrity": "sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==", + "dependencies": { + "void-elements": "3.1.0" + } + }, + "node_modules/htmlnano": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/htmlnano/-/htmlnano-2.1.1.tgz", + "integrity": "sha512-kAERyg/LuNZYmdqgCdYvugyLWNFAm8MWXpQMz1pLpetmCbFwoMxvkSoaAMlFrOC4OKTWI4KlZGT/RsNxg4ghOw==", + "dependencies": { + "cosmiconfig": "^9.0.0", + "posthtml": "^0.16.5", + "timsort": "^0.3.0" + }, + "peerDependencies": { + "cssnano": "^7.0.0", + "postcss": "^8.3.11", + "purgecss": "^6.0.0", + "relateurl": "^0.2.7", + "srcset": "5.0.1", + "svgo": "^3.0.2", + "terser": "^5.10.0", + "uncss": "^0.17.3" + }, + "peerDependenciesMeta": { + "cssnano": { + "optional": true + }, + "postcss": { + "optional": true + }, + "purgecss": { + "optional": true + }, + "relateurl": { + "optional": true + }, + "srcset": { + "optional": true + }, + "svgo": { + "optional": true + }, + "terser": { + "optional": true + }, + "uncss": { + "optional": true + } + } + }, + "node_modules/htmlnano/node_modules/cosmiconfig": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", + "integrity": "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==", + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/htmlparser2": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-7.2.0.tgz", + "integrity": "sha512-H7MImA4MS6cw7nbyURtLPO1Tms7C5H602LRETv95z1MxO/7CP7rDVROehUYeYBUYEON94NXXDEPmZuq+hX4sog==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.2", + "domutils": "^2.8.0", + "entities": "^3.0.1" + } + }, + "node_modules/i18next": { + "version": "22.5.1", + "resolved": "https://registry.npmjs.org/i18next/-/i18next-22.5.1.tgz", + "integrity": "sha512-8TGPgM3pAD+VRsMtUMNknRz3kzqwp/gPALrWMsDnmC1mKqJwpWyooQRLMcbTwq8z8YwSmuj+ZYvc+xCuEpkssA==", + "funding": [ + { + "type": "individual", + "url": "https://locize.com" + }, + { + "type": "individual", + "url": "https://locize.com/i18next.html" + }, + { + "type": "individual", + "url": "https://www.i18next.com/how-to/faq#i18next-is-awesome.-how-can-i-support-the-project" + } + ], + "dependencies": { + "@babel/runtime": "^7.20.6" + } + }, + "node_modules/i18next-browser-languagedetector": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/i18next-browser-languagedetector/-/i18next-browser-languagedetector-7.2.1.tgz", + "integrity": "sha512-h/pM34bcH6tbz8WgGXcmWauNpQupCGr25XPp9cZwZInR9XHSjIFDYp1SIok7zSPsTOMxdvuLyu86V+g2Kycnfw==", + "dependencies": { + "@babel/runtime": "^7.23.2" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/immutable": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.6.tgz", + "integrity": "sha512-Ju0+lEMyzMVZarkTn/gqRpdqd5dOPaz1mCZ0SH3JV6iFw81PldE/PEB1hWVEA288HPt4WXW8O7AWxB10M+03QQ==", + "dev": true + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/inquirer": { + "version": "8.2.6", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.6.tgz", + "integrity": "sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg==", + "dev": true, + "dependencies": { + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.1", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.21", + "mute-stream": "0.0.8", + "ora": "^5.4.1", + "run-async": "^2.4.0", + "rxjs": "^7.5.5", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6", + "wrap-ansi": "^6.0.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/inquirer/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/inquirer/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/inquirer/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/inquirer/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/inquirer/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/inquirer/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/internal-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", + "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/is-arguments": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", + "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-async-function": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", + "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-boolean-object": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", + "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", + "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", + "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", + "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-json": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-json/-/is-json-2.0.1.tgz", + "integrity": "sha512-6BEnpVn1rcf3ngfmViLM6vjUjGErbdrL4rwlv+u1NO1XO8kqT4YGL8+19Q+Z/bas8tY90BTWMk2+fW1g6hQjbA==" + }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-node-process": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz", + "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==", + "dev": true + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", + "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", + "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", + "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", + "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", + "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", + "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/iterator.prototype": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", + "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "get-proto": "^1.0.0", + "has-symbols": "^1.1.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/js-levenshtein": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", + "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", + "dev": true, + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/language-subtag-registry": { + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", + "dev": true + }, + "node_modules/language-tags": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", + "dev": true, + "dependencies": { + "language-subtag-registry": "^0.3.20" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.25.1.tgz", + "integrity": "sha512-V0RMVZzK1+rCHpymRv4URK2lNhIRyO8g7U7zOFwVAhJuat74HtkjIQpQRKNCwFEYkRGpafOpmXXLoaoBcyVtBg==", + "dependencies": { + "detect-libc": "^1.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.25.1", + "lightningcss-darwin-x64": "1.25.1", + "lightningcss-freebsd-x64": "1.25.1", + "lightningcss-linux-arm-gnueabihf": "1.25.1", + "lightningcss-linux-arm64-gnu": "1.25.1", + "lightningcss-linux-arm64-musl": "1.25.1", + "lightningcss-linux-x64-gnu": "1.25.1", + "lightningcss-linux-x64-musl": "1.25.1", + "lightningcss-win32-x64-msvc": "1.25.1" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.25.1.tgz", + "integrity": "sha512-G4Dcvv85bs5NLENcu/s1f7ehzE3D5ThnlWSDwE190tWXRQCQaqwcuHe+MGSVI/slm0XrxnaayXY+cNl3cSricw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.25.1.tgz", + "integrity": "sha512-dYWuCzzfqRueDSmto6YU5SoGHvZTMU1Em9xvhcdROpmtOQLorurUZz8+xFxZ51lCO2LnYbfdjZ/gCqWEkwixNg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.25.1.tgz", + "integrity": "sha512-hXoy2s9A3KVNAIoKz+Fp6bNeY+h9c3tkcx1J3+pS48CqAt+5bI/R/YY4hxGL57fWAIquRjGKW50arltD6iRt/w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.25.1.tgz", + "integrity": "sha512-tWyMgHFlHlp1e5iW3EpqvH5MvsgoN7ZkylBbG2R2LWxnvH3FuWCJOhtGcYx9Ks0Kv0eZOBud789odkYLhyf1ng==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.25.1.tgz", + "integrity": "sha512-Xjxsx286OT9/XSnVLIsFEDyDipqe4BcLeB4pXQ/FEA5+2uWCCuAEarUNQumRucnj7k6ftkAHUEph5r821KBccQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.25.1.tgz", + "integrity": "sha512-IhxVFJoTW8wq6yLvxdPvyHv4NjzcpN1B7gjxrY3uaykQNXPHNIpChLB52+wfH+yS58zm1PL4LemUp8u9Cfp6Bw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.25.1.tgz", + "integrity": "sha512-RXIaru79KrREPEd6WLXfKfIp4QzoppZvD3x7vuTKkDA64PwTzKJ2jaC43RZHRt8BmyIkRRlmywNhTRMbmkPYpA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.25.1.tgz", + "integrity": "sha512-TdcNqFsAENEEFr8fJWg0Y4fZ/nwuqTRsIr7W7t2wmDUlA8eSXVepeeONYcb+gtTj1RaXn/WgNLB45SFkz+XBZA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.25.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.25.1.tgz", + "integrity": "sha512-9KZZkmmy9oGDSrnyHuxP6iMhbsgChUiu/NSgOx+U1I/wTngBStDf2i2aGRCHvFqj19HqqBEI4WuGVQBa2V6e0A==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/linkify-react": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/linkify-react/-/linkify-react-4.1.3.tgz", + "integrity": "sha512-rhI3zM/fxn5BfRPHfi4r9N7zgac4vOIxub1wHIWXLA5ENTMs+BGaIaFO1D1PhmxgwhIKmJz3H7uCP0Dg5JwSlA==", + "peerDependencies": { + "linkifyjs": "^4.0.0", + "react": ">= 15.0.0" + } + }, + "node_modules/linkifyjs": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/linkifyjs/-/linkifyjs-4.1.3.tgz", + "integrity": "sha512-auMesunaJ8yfkHvK4gfg1K0SaKX/6Wn9g2Aac/NwX+l5VdmFZzo/hdPGxEOETj+ryRa4/fiOPjeeKURSAJx1sg==" + }, + "node_modules/lmdb": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/lmdb/-/lmdb-2.8.5.tgz", + "integrity": "sha512-9bMdFfc80S+vSldBmG3HOuLVHnxRdNTlpzR6QDnzqCQtCzGUEAGTzBKYMeIM+I/sU4oZfgbcbS7X7F65/z/oxQ==", + "hasInstallScript": true, + "dependencies": { + "msgpackr": "^1.9.5", + "node-addon-api": "^6.1.0", + "node-gyp-build-optional-packages": "5.1.1", + "ordered-binary": "^1.4.1", + "weak-lru-cache": "^1.2.2" + }, + "bin": { + "download-lmdb-prebuilds": "bin/download-prebuilds.js" + }, + "optionalDependencies": { + "@lmdb/lmdb-darwin-arm64": "2.8.5", + "@lmdb/lmdb-darwin-x64": "2.8.5", + "@lmdb/lmdb-linux-arm": "2.8.5", + "@lmdb/lmdb-linux-arm64": "2.8.5", + "@lmdb/lmdb-linux-x64": "2.8.5", + "@lmdb/lmdb-win32-x64": "2.8.5" + } + }, + "node_modules/lmdb/node_modules/node-addon-api": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz", + "integrity": "sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA==" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/log-symbols/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/log-symbols/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/log-symbols/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/magic-string": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz", + "integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.13" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/material-colors": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/material-colors/-/material-colors-1.2.6.tgz", + "integrity": "sha512-6qE4B9deFBIa9YSpOc9O0Sgc43zTeVYbgDT5veRKSlB2+ZuHNoVVxA1L/ckMUayV9Ay9y7Z/SZCLcGteW9i7bg==" + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + }, + "node_modules/memoize-one": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-6.0.0.tgz", + "integrity": "sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", + "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mocksse": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mocksse/-/mocksse-1.0.4.tgz", + "integrity": "sha512-W5DR/wwmx/EZUgjN1g+pvlhvFFtRJ3CqGRKqsK/B1hTxrjMb/t3JCbk6aomJD4WomrnueqMaTAhcAkIZJYd73w==", + "dev": true + }, + "node_modules/moment": { + "version": "2.30.1", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.30.1.tgz", + "integrity": "sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==", + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/msgpackr": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.10.2.tgz", + "integrity": "sha512-L60rsPynBvNE+8BWipKKZ9jHcSGbtyJYIwjRq0VrIvQ08cRjntGXJYW/tmciZ2IHWIY8WEW32Qa2xbh5+SKBZA==", + "optionalDependencies": { + "msgpackr-extract": "^3.0.2" + } + }, + "node_modules/msgpackr-extract": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/msgpackr-extract/-/msgpackr-extract-3.0.3.tgz", + "integrity": "sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "node-gyp-build-optional-packages": "5.2.2" + }, + "bin": { + "download-msgpackr-prebuilds": "bin/download-prebuilds.js" + }, + "optionalDependencies": { + "@msgpackr-extract/msgpackr-extract-darwin-arm64": "3.0.3", + "@msgpackr-extract/msgpackr-extract-darwin-x64": "3.0.3", + "@msgpackr-extract/msgpackr-extract-linux-arm": "3.0.3", + "@msgpackr-extract/msgpackr-extract-linux-arm64": "3.0.3", + "@msgpackr-extract/msgpackr-extract-linux-x64": "3.0.3", + "@msgpackr-extract/msgpackr-extract-win32-x64": "3.0.3" + } + }, + "node_modules/msgpackr-extract/node_modules/detect-libc": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/msgpackr-extract/node_modules/node-gyp-build-optional-packages": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.2.2.tgz", + "integrity": "sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==", + "optional": true, + "dependencies": { + "detect-libc": "^2.0.1" + }, + "bin": { + "node-gyp-build-optional-packages": "bin.js", + "node-gyp-build-optional-packages-optional": "optional.js", + "node-gyp-build-optional-packages-test": "build-test.js" + } + }, + "node_modules/msw": { + "version": "0.49.3", + "resolved": "https://registry.npmjs.org/msw/-/msw-0.49.3.tgz", + "integrity": "sha512-kRCbDNbNnRq5LC1H/NUceZlrPAvSrMH6Or0mirIuH69NY84xwDruPn/hkXTovIK1KwDwbk+ZdoSyJlpiekLxEA==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "@mswjs/cookies": "^0.2.2", + "@mswjs/interceptors": "^0.17.5", + "@open-draft/until": "^1.0.3", + "@types/cookie": "^0.4.1", + "@types/js-levenshtein": "^1.1.1", + "chalk": "4.1.1", + "chokidar": "^3.4.2", + "cookie": "^0.4.2", + "graphql": "^15.0.0 || ^16.0.0", + "headers-polyfill": "^3.1.0", + "inquirer": "^8.2.0", + "is-node-process": "^1.0.1", + "js-levenshtein": "^1.1.6", + "node-fetch": "^2.6.7", + "outvariant": "^1.3.0", + "path-to-regexp": "^6.2.0", + "strict-event-emitter": "^0.4.3", + "type-fest": "^2.19.0", + "yargs": "^17.3.1" + }, + "bin": { + "msw": "cli/index.js" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mswjs" + }, + "peerDependencies": { + "typescript": ">= 4.4.x <= 4.9.x" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/msw/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/msw/node_modules/chalk": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", + "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/msw/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/msw/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/msw/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/msw/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", + "dev": true + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/natural-compare-lite": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", + "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-addon-api": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.0.tgz", + "integrity": "sha512-mNcltoe1R8o7STTegSOHdnJNN7s5EUvhoS7ShnTHDyOSd+8H+UdWODq6qSv67PjC8Zc5JRT8+oLAMCr0SIXw7g==", + "engines": { + "node": "^16 || ^18 || >= 20" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dev": true, + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-gyp-build-optional-packages": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.1.1.tgz", + "integrity": "sha512-+P72GAjVAbTxjjwUmwjVrqrdZROD4nf8KgpBoDxqXXTiYZZt/ud60dE5yvCSr9lRO8e8yv6kgJIC0K0PfZFVQw==", + "dependencies": { + "detect-libc": "^2.0.1" + }, + "bin": { + "node-gyp-build-optional-packages": "bin.js", + "node-gyp-build-optional-packages-optional": "optional.js", + "node-gyp-build-optional-packages-test": "build-test.js" + } + }, + "node_modules/node-gyp-build-optional-packages/node_modules/detect-libc": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/node-releases": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/nullthrows": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/nullthrows/-/nullthrows-1.1.1.tgz", + "integrity": "sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw==" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz", + "integrity": "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.values": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz", + "integrity": "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/ora/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/ora/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/ora/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/ora/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ora/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ordered-binary": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/ordered-binary/-/ordered-binary-1.5.1.tgz", + "integrity": "sha512-5VyHfHY3cd0iza71JepYG50My+YUbrFtGoUz2ooEydPyPM7Aai/JW098juLr+RG6+rDJuzNNTsEQu2DZa1A41A==" + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/outvariant": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.2.tgz", + "integrity": "sha512-Ou3dJ6bA/UJ5GVHxah4LnqDwZRwAmWxrG3wtrHrbGnP4RnLCtA64A4F+ae7Y8ww660JaddSoArUR5HjipWSHAQ==", + "dev": true + }, + "node_modules/own-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", + "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parcel": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/parcel/-/parcel-2.12.0.tgz", + "integrity": "sha512-W+gxAq7aQ9dJIg/XLKGcRT0cvnStFAQHPaI0pvD0U2l6IVLueUAm3nwN7lkY62zZNmlvNx6jNtE4wlbS+CyqSg==", + "dependencies": { + "@parcel/config-default": "2.12.0", + "@parcel/core": "2.12.0", + "@parcel/diagnostic": "2.12.0", + "@parcel/events": "2.12.0", + "@parcel/fs": "2.12.0", + "@parcel/logger": "2.12.0", + "@parcel/package-manager": "2.12.0", + "@parcel/reporter-cli": "2.12.0", + "@parcel/reporter-dev-server": "2.12.0", + "@parcel/reporter-tracer": "2.12.0", + "@parcel/utils": "2.12.0", + "chalk": "^4.1.0", + "commander": "^7.0.0", + "get-port": "^4.2.0" + }, + "bin": { + "parcel": "lib/bin.js" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/parcel/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/parcel/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/parcel/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/parcel/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/parcel/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/parcel/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.2.tgz", + "integrity": "sha512-GQX3SSMokngb36+whdpRXE+3f9V8UzyAorlYvOGx87ufGHehNTn5lCxrKtLyZ4Yl/wEKnNnr98ZzOwwDZV5ogw==", + "dev": true + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/posthtml": { + "version": "0.16.6", + "resolved": "https://registry.npmjs.org/posthtml/-/posthtml-0.16.6.tgz", + "integrity": "sha512-JcEmHlyLK/o0uGAlj65vgg+7LIms0xKXe60lcDOTU7oVX/3LuEuLwrQpW3VJ7de5TaFKiW4kWkaIpJL42FEgxQ==", + "dependencies": { + "posthtml-parser": "^0.11.0", + "posthtml-render": "^3.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/posthtml-parser": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.10.2.tgz", + "integrity": "sha512-PId6zZ/2lyJi9LiKfe+i2xv57oEjJgWbsHGGANwos5AvdQp98i6AtamAl8gzSVFGfQ43Glb5D614cvZf012VKg==", + "dependencies": { + "htmlparser2": "^7.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/posthtml-render": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/posthtml-render/-/posthtml-render-3.0.0.tgz", + "integrity": "sha512-z+16RoxK3fUPgwaIgH9NGnK1HKY9XIDpydky5eQGgAFVXTCSezalv9U2jQuNV+Z9qV1fDWNzldcw4eK0SSbqKA==", + "dependencies": { + "is-json": "^2.0.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/posthtml/node_modules/posthtml-parser": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.11.0.tgz", + "integrity": "sha512-QecJtfLekJbWVo/dMAA+OSwY79wpRmbqS5TeXvXSX+f0c6pW4/SE6inzZ2qkU7oAMCPqIDkZDvd/bQsSFUnKyw==", + "dependencies": { + "htmlparser2": "^7.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", + "dev": true, + "bin": { + "prettier": "bin-prettier.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/property-expr": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", + "integrity": "sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==" + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-color": { + "version": "2.19.3", + "resolved": "https://registry.npmjs.org/react-color/-/react-color-2.19.3.tgz", + "integrity": "sha512-LEeGE/ZzNLIsFWa1TMe8y5VYqr7bibneWmvJwm1pCn/eNmrabWDh659JSPn9BuaMpEfU83WTOJfnCcjDZwNQTA==", + "dependencies": { + "@icons/material": "^0.2.4", + "lodash": "^4.17.15", + "lodash-es": "^4.17.15", + "material-colors": "^1.2.1", + "prop-types": "^15.5.10", + "reactcss": "^1.2.0", + "tinycolor2": "^1.4.1" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-cookie": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/react-cookie/-/react-cookie-4.1.1.tgz", + "integrity": "sha512-ffn7Y7G4bXiFbnE+dKhHhbP+b8I34mH9jqnm8Llhj89zF4nPxPutxHT1suUqMeCEhLDBI7InYwf1tpaSoK5w8A==", + "dependencies": { + "@types/hoist-non-react-statics": "^3.0.1", + "hoist-non-react-statics": "^3.0.0", + "universal-cookie": "^4.0.0" + }, + "peerDependencies": { + "react": ">= 16.3.0" + } + }, + "node_modules/react-datepicker": { + "version": "4.25.0", + "resolved": "https://registry.npmjs.org/react-datepicker/-/react-datepicker-4.25.0.tgz", + "integrity": "sha512-zB7CSi44SJ0sqo8hUQ3BF1saE/knn7u25qEMTO1CQGofY1VAKahO8k9drZtp0cfW1DMfoYLR3uSY1/uMvbEzbg==", + "dependencies": { + "@popperjs/core": "^2.11.8", + "classnames": "^2.2.6", + "date-fns": "^2.30.0", + "prop-types": "^15.7.2", + "react-onclickoutside": "^6.13.0", + "react-popper": "^2.3.0" + }, + "peerDependencies": { + "react": "^16.9.0 || ^17 || ^18", + "react-dom": "^16.9.0 || ^17 || ^18" + } + }, + "node_modules/react-dnd": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/react-dnd/-/react-dnd-16.0.1.tgz", + "integrity": "sha512-QeoM/i73HHu2XF9aKksIUuamHPDvRglEwdHL4jsp784BgUuWcg6mzfxT0QDdQz8Wj0qyRKx2eMg8iZtWvU4E2Q==", + "dependencies": { + "@react-dnd/invariant": "^4.0.1", + "@react-dnd/shallowequal": "^4.0.1", + "dnd-core": "^16.0.1", + "fast-deep-equal": "^3.1.3", + "hoist-non-react-statics": "^3.3.2" + }, + "peerDependencies": { + "@types/hoist-non-react-statics": ">= 3.3.1", + "@types/node": ">= 12", + "@types/react": ">= 16", + "react": ">= 16.14" + }, + "peerDependenciesMeta": { + "@types/hoist-non-react-statics": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-dnd-html5-backend": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/react-dnd-html5-backend/-/react-dnd-html5-backend-16.0.1.tgz", + "integrity": "sha512-Wu3dw5aDJmOGw8WjH1I1/yTH+vlXEL4vmjk5p+MHxP8HuHJS1lAGeIdG/hze1AvNeXWo/JgULV87LyQOr+r5jw==", + "dependencies": { + "dnd-core": "^16.0.1" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.9", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.9.tgz", + "integrity": "sha512-nQTTcUu+ATDbrSD1BZHr5kgSD4oF8OFjxun8uAaL8RwPBacGBNPf/yAuVVdx17N8XNzRDMrZ9XcKZHCjPW+9ew==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-hook-form": { + "version": "7.52.1", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.52.1.tgz", + "integrity": "sha512-uNKIhaoICJ5KQALYZ4TOaOLElyM+xipord+Ha3crEFhTntdLvWZqVY49Wqd/0GiVCA/f9NjemLeiNPjG7Hpurg==", + "engines": { + "node": ">=12.22.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/react-hook-form" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18 || ^19" + } + }, + "node_modules/react-i18next": { + "version": "12.3.1", + "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-12.3.1.tgz", + "integrity": "sha512-5v8E2XjZDFzK7K87eSwC7AJcAkcLt5xYZ4+yTPDAW1i7C93oOY1dnr4BaQM7un4Hm+GmghuiPvevWwlca5PwDA==", + "dependencies": { + "@babel/runtime": "^7.20.6", + "html-parse-stringify": "^3.0.1" + }, + "peerDependencies": { + "i18next": ">= 19.0.0", + "react": ">= 16.8.0" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, + "node_modules/react-icons": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/react-icons/-/react-icons-4.12.0.tgz", + "integrity": "sha512-IBaDuHiShdZqmfc/TwHu6+d6k2ltNCf3AszxNmjJc1KUfXdEeRJOKyNvLmAHaarhzGmTSVygNdyu8/opXv2gaw==", + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-idle-timer": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/react-idle-timer/-/react-idle-timer-5.7.2.tgz", + "integrity": "sha512-+BaPfc7XEUU5JFkwZCx6fO1bLVK+RBlFH+iY4X34urvIzZiZINP6v2orePx3E6pAztJGE7t4DzvL7if2SL/0GQ==", + "peerDependencies": { + "react": ">=16", + "react-dom": ">=16" + } + }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==" + }, + "node_modules/react-lifecycles-compat": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz", + "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" + }, + "node_modules/react-modal": { + "version": "3.16.1", + "resolved": "https://registry.npmjs.org/react-modal/-/react-modal-3.16.1.tgz", + "integrity": "sha512-VStHgI3BVcGo7OXczvnJN7yT2TWHJPDXZWyI/a0ssFNhGZWsPmB8cF0z33ewDXq4VfYMO1vXgiv/g8Nj9NDyWg==", + "dependencies": { + "exenv": "^1.2.0", + "prop-types": "^15.7.2", + "react-lifecycles-compat": "^3.0.0", + "warning": "^4.0.3" + }, + "engines": { + "node": ">=8" + }, + "peerDependencies": { + "react": "^0.14.0 || ^15.0.0 || ^16 || ^17 || ^18", + "react-dom": "^0.14.0 || ^15.0.0 || ^16 || ^17 || ^18" + } + }, + "node_modules/react-onclickoutside": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/react-onclickoutside/-/react-onclickoutside-6.13.1.tgz", + "integrity": "sha512-LdrrxK/Yh9zbBQdFbMTXPp3dTSN9B+9YJQucdDu3JNKRrbdU+H+/TVONJoWtOwy4II8Sqf1y/DTI6w/vGPYW0w==", + "funding": { + "type": "individual", + "url": "https://github.com/Pomax/react-onclickoutside/blob/master/FUNDING.md" + }, + "peerDependencies": { + "react": "^15.5.x || ^16.x || ^17.x || ^18.x", + "react-dom": "^15.5.x || ^16.x || ^17.x || ^18.x" + } + }, + "node_modules/react-popper": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/react-popper/-/react-popper-2.3.0.tgz", + "integrity": "sha512-e1hj8lL3uM+sgSR4Lxzn5h1GxBlpa4CQz0XLF8kx4MDrDRWY0Ena4c97PUeSX9i5W3UAfDP0z0FXCTQkoXUl3Q==", + "dependencies": { + "react-fast-compare": "^3.0.1", + "warning": "^4.0.2" + }, + "peerDependencies": { + "@popperjs/core": "^2.0.0", + "react": "^16.8.0 || ^17 || ^18", + "react-dom": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/react-redux": { + "version": "8.1.3", + "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-8.1.3.tgz", + "integrity": "sha512-n0ZrutD7DaX/j9VscF+uTALI3oUPa/pO4Z3soOBIjuRn/FzVu6aehhysxZCLi6y7duMf52WNZGMl7CtuK5EnRw==", + "dependencies": { + "@babel/runtime": "^7.12.1", + "@types/hoist-non-react-statics": "^3.3.1", + "@types/use-sync-external-store": "^0.0.3", + "hoist-non-react-statics": "^3.3.2", + "react-is": "^18.0.0", + "use-sync-external-store": "^1.0.0" + }, + "peerDependencies": { + "@types/react": "^16.8 || ^17.0 || ^18.0", + "@types/react-dom": "^16.8 || ^17.0 || ^18.0", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0", + "react-native": ">=0.59", + "redux": "^4 || ^5.0.0-beta.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + }, + "redux": { + "optional": true + } + } + }, + "node_modules/react-redux/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" + }, + "node_modules/react-refresh": { + "version": "0.14.2", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.2.tgz", + "integrity": "sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.5.5", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", + "integrity": "sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==", + "dependencies": { + "react-remove-scroll-bar": "^2.3.3", + "react-style-singleton": "^2.2.1", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.0", + "use-sidecar": "^1.1.2" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz", + "integrity": "sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==", + "dependencies": { + "react-style-singleton": "^2.2.1", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-router": { + "version": "6.23.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.23.1.tgz", + "integrity": "sha512-fzcOaRF69uvqbbM7OhvQyBTFDVrrGlsFdS3AL+1KfIBtGETibHzi3FkoTRyiDJnWNc2VxrfvR+657ROHjaNjqQ==", + "dependencies": { + "@remix-run/router": "1.16.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.23.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.23.1.tgz", + "integrity": "sha512-utP+K+aSTtEdbWpC+4gxhdlPFwuEfDKq8ZrPFU65bbRJY+l706qjR7yaidBpo3MSeA/fzwbXWbKBI6ftOnP3OQ==", + "dependencies": { + "@remix-run/router": "1.16.1", + "react-router": "6.23.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/react-select": { + "version": "5.8.0", + "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.8.0.tgz", + "integrity": "sha512-TfjLDo58XrhP6VG5M/Mi56Us0Yt8X7xD6cDybC7yoRMUNm7BGO7qk8J0TLQOua/prb8vUOtsfnXZwfm30HGsAA==", + "dependencies": { + "@babel/runtime": "^7.12.0", + "@emotion/cache": "^11.4.0", + "@emotion/react": "^11.8.1", + "@floating-ui/dom": "^1.0.1", + "@types/react-transition-group": "^4.4.0", + "memoize-one": "^6.0.0", + "prop-types": "^15.6.0", + "react-transition-group": "^4.3.0", + "use-isomorphic-layout-effect": "^1.1.2" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", + "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "dependencies": { + "get-nonce": "^1.0.0", + "invariant": "^2.2.4", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-text-selection-popover": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/react-text-selection-popover/-/react-text-selection-popover-2.0.2.tgz", + "integrity": "sha512-VbQnJMHX6GrMRS5QGQnb8YuFL45JRcosraTJjdmjib4Xt9MOcTHXmuIyI12xbG2QZv2Tsa+aOZvYgTlo8I00dA==", + "dependencies": { + "use-text-selection": "^1.1.3" + }, + "peerDependencies": { + "react": "^16.8.0,^17.x,^18.x", + "react-dom": "^16.8.0,^17.x,^18.x" + } + }, + "node_modules/react-textarea-autosize": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", + "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", + "dependencies": { + "@babel/runtime": "^7.20.13", + "use-composed-ref": "^1.3.0", + "use-latest": "^1.2.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-transition-group": { + "version": "4.4.5", + "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz", + "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==", + "dependencies": { + "@babel/runtime": "^7.5.5", + "dom-helpers": "^5.0.1", + "loose-envify": "^1.4.0", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": ">=16.6.0", + "react-dom": ">=16.6.0" + } + }, + "node_modules/reactcss": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/reactcss/-/reactcss-1.2.3.tgz", + "integrity": "sha512-KiwVUcFu1RErkI97ywr8nvx8dNOpT03rbnma0SSalTYjkrPYaEajR4a/MRt6DZ46K6arDRbWMNHF+xH7G7n/8A==", + "dependencies": { + "lodash": "^4.0.1" + } + }, + "node_modules/reactflow": { + "version": "11.11.3", + "resolved": "https://registry.npmjs.org/reactflow/-/reactflow-11.11.3.tgz", + "integrity": "sha512-wusd1Xpn1wgsSEv7UIa4NNraCwH9syBtubBy4xVNXg3b+CDKM+sFaF3hnMx0tr0et4km9urIDdNvwm34QiZong==", + "dependencies": { + "@reactflow/background": "11.3.13", + "@reactflow/controls": "11.2.13", + "@reactflow/core": "11.11.3", + "@reactflow/minimap": "11.7.13", + "@reactflow/node-resizer": "2.2.13", + "@reactflow/node-toolbar": "1.3.13" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/redux": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/redux/-/redux-4.2.1.tgz", + "integrity": "sha512-LAUYz4lc+Do8/g7aeRa8JkyDErK6ekstQaqWQrNRW//MY1TvCEpMtpTWvlQ+FPbWCx+Xixu/6SHt5N0HR+SB4w==", + "dependencies": { + "@babel/runtime": "^7.9.2" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", + "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "dev": true + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dev": true, + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dev": true, + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexify-string": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/regexify-string/-/regexify-string-1.0.19.tgz", + "integrity": "sha512-EREOggl31J6v2Hk3ksPuOof0DMq5QhFfVQ7iDaGQ6BeA1QcrV4rhGvwCES5a72ITMmLBDAOb6cOWbn8/Ja82Ig==" + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", + "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dev": true, + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dev": true, + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/remove-accents": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.5.0.tgz", + "integrity": "sha512-8g3/Otx1eJaVD12e31UbJj1YzdtVvzH85HV7t+9MJYk/u3XmkOUJ5Ys9wQrf9PCPK8+xn4ymzqYCiZl6QWKn+A==" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requireindex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/requireindex/-/requireindex-1.1.0.tgz", + "integrity": "sha512-LBnkqsDE7BZKvqylbmn7lTIVdpx4K/QCduRATpO5R+wtPmky/a8pN1bO2D6wXppn1497AJF9mNjqAXr6bdl9jg==", + "dev": true, + "engines": { + "node": ">=0.10.5" + } + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "3.29.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", + "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", + "dev": true, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=14.18.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", + "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-push-apply": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", + "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "node_modules/sass": { + "version": "1.77.5", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.5.tgz", + "integrity": "sha512-oDfX1mukIlxacPdQqNb6mV2tVCrnE+P3nVYioy72V5tlk56CPNcO4TCuFcaCRKKfJ1M3lH95CleRS+dVKL2qMg==", + "dev": true, + "dependencies": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/set-cookie-parser": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz", + "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==", + "dev": true + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-proto": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", + "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stable": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility" + }, + "node_modules/strict-event-emitter": { + "version": "0.4.6", + "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.4.6.tgz", + "integrity": "sha512-12KWeb+wixJohmnwNFerbyiBrAlq5qJLwIt38etRtKtmmHyDSoGlIqFE9wx+4IwG0aDjI7GV8tc8ZccjWZZtTg==", + "dev": true + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-natural-compare": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/string-natural-compare/-/string-natural-compare-3.0.1.tgz", + "integrity": "sha512-n3sPwynL1nwKi3WJ6AIsClwBMa0zTi54fn2oLU6ndfTSIO05xaznjSf15PcBZU6FNWbmN5Q6cxT4V5hGvB4taw==", + "dev": true + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/string.prototype.includes": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", + "integrity": "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", + "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "regexp.prototype.flags": "^1.5.3", + "set-function-name": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.repeat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", + "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", + "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", + "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + }, + "node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==", + "dev": true + }, + "node_modules/svgo": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", + "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/term-size": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz", + "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "dev": true + }, + "node_modules/timeago.js": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/timeago.js/-/timeago.js-4.0.2.tgz", + "integrity": "sha512-a7wPxPdVlQL7lqvitHGGRsofhdwtkoSXPGATFuSOA2i1ZNQEPLrGnj68vOp2sOJTCFAQVXPeNMX/GctBaO9L2w==" + }, + "node_modules/timsort": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", + "integrity": "sha512-qsdtZH+vMoCARQtyod4imc2nIJwg9Cc7lPRrw9CzF8ZKR0khdr8+2nX80PBhET3tcyTtJDxAffGh2rXH4tyU8A==" + }, + "node_modules/tiny-case": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-case/-/tiny-case-1.0.3.tgz", + "integrity": "sha512-Eet/eeMhkO6TX8mnUteS9zgPbUMQa4I6Kkp5ORiBD5476/m+PIRiumP5tmh5ioJpH7k51Kehawy2UDfsnxxY8Q==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/tinycolor2": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/tinycolor2/-/tinycolor2-1.6.0.tgz", + "integrity": "sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw==" + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toposort": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz", + "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==" + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "dev": true + }, + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/tsconfck": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/tsconfck/-/tsconfck-3.1.0.tgz", + "integrity": "sha512-CMjc5zMnyAjcS9sPLytrbFmj89st2g+JYtY/c02ug4Q+CZaAtCgbyviI0n1YvjZE/pzoc6FbNsINS13DOL1B9w==", + "dev": true, + "bin": { + "tsconfck": "bin/tsconfck.js" + }, + "engines": { + "node": "^18 || >=20" + }, + "peerDependencies": { + "typescript": "^5.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/tsconfig-paths": { + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", + "dev": true, + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/tsconfig-paths/node_modules/json5": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", + "dev": true, + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/tslib": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" + }, + "node_modules/tsutils": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", + "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", + "dev": true, + "dependencies": { + "tslib": "^1.8.1" + }, + "engines": { + "node": ">= 6" + }, + "peerDependencies": { + "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + } + }, + "node_modules/tsutils/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "dev": true + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", + "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", + "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", + "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=4.2.0" + } + }, + "node_modules/unbox-primitive": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", + "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dev": true, + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/universal-cookie": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/universal-cookie/-/universal-cookie-4.0.4.tgz", + "integrity": "sha512-lbRVHoOMtItjWbM7TwDLdl8wug7izB0tq3/YVKhT/ahB4VDvWMyvnADfnJI8y6fSvsjh51Ix7lTGC6Tn4rMPhw==", + "dependencies": { + "@types/cookie": "^0.3.3", + "cookie": "^0.4.0" + } + }, + "node_modules/universal-cookie/node_modules/@types/cookie": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.3.3.tgz", + "integrity": "sha512-LKVP3cgXBT9RYj+t+9FDKwS5tdI+rPBXaNSkma7hvqy35lc7mAokC2zsqWJH0LaqIt3B962nuYI77hsJoT1gow==" + }, + "node_modules/update-browserslist-db": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.16.tgz", + "integrity": "sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", + "integrity": "sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-composed-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/use-isomorphic-layout-effect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-latest": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", + "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "dependencies": { + "use-isomorphic-layout-effect": "^1.1.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", + "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sync-external-store": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz", + "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/use-text-selection": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/use-text-selection/-/use-text-selection-1.1.5.tgz", + "integrity": "sha512-JOuQYG0vKHRj0dfax0dy/HxyF31MN0Q2UP1rl1LtFA0qnQ0Uw4XGh4BucHA9g8kxlnVFv+JTlJQ4B+TwXCGxOg==", + "dependencies": { + "parcel": "^2.0.0-beta.2" + }, + "peerDependencies": { + "react": "^17.0.1" + } + }, + "node_modules/usehooks-ts": { + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/usehooks-ts/-/usehooks-ts-2.16.0.tgz", + "integrity": "sha512-bez95WqYujxp6hFdM/CpRDiVPirZPxlMzOH2QB8yopoKQMXpscyZoxOjpEdaxvV+CAWUDSM62cWnqHE0E/MZ7w==", + "dependencies": { + "lodash.debounce": "^4.0.8" + }, + "engines": { + "node": ">=16.15.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dev": true, + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true + }, + "node_modules/utility-types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", + "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vite": { + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.3.tgz", + "integrity": "sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg==", + "dev": true, + "dependencies": { + "esbuild": "^0.18.10", + "postcss": "^8.4.27", + "rollup": "^3.27.1" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@types/node": ">= 14", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-plugin-env-compatible": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/vite-plugin-env-compatible/-/vite-plugin-env-compatible-1.1.1.tgz", + "integrity": "sha512-4lqhBWhOzP+SaCPoCVdmpM5cXzjKQV5jgFauxea488oOeElXo/kw6bXkMIooZhrh9q7gclTl8en6N9NmnqUwRQ==", + "dev": true + }, + "node_modules/vite-plugin-svgr": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/vite-plugin-svgr/-/vite-plugin-svgr-2.4.0.tgz", + "integrity": "sha512-q+mJJol6ThvqkkJvvVFEndI4EaKIjSI0I3jNFgSoC9fXAz1M7kYTVUin8fhUsFojFDKZ9VHKtX6NXNaOLpbsHA==", + "dev": true, + "dependencies": { + "@rollup/pluginutils": "^5.0.2", + "@svgr/core": "^6.5.1" + }, + "peerDependencies": { + "vite": "^2.6.0 || 3 || 4" + } + }, + "node_modules/vite-plugin-transform": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/vite-plugin-transform/-/vite-plugin-transform-2.0.1.tgz", + "integrity": "sha512-sI9SzcuFbCj04YHEmhw9C14kNnVq3QFLWq7eofjNnDWnw/p+i+6pnSvVZSx1GDVpW1ciZglrv794XEU/lGGvyA==", + "dev": true + }, + "node_modules/vite-tsconfig-paths": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/vite-tsconfig-paths/-/vite-tsconfig-paths-4.3.2.tgz", + "integrity": "sha512-0Vd/a6po6Q+86rPlntHye7F31zA2URZMbH8M3saAZ/xR9QoGN/L21bxEGfXdWmFdNkqPpRdxFT7nmNe12e9/uA==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "globrex": "^0.1.2", + "tsconfck": "^3.0.3" + }, + "peerDependencies": { + "vite": "*" + }, + "peerDependenciesMeta": { + "vite": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz", + "integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz", + "integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz", + "integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", + "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz", + "integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz", + "integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz", + "integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz", + "integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz", + "integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ia32": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz", + "integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-loong64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz", + "integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==", + "cpu": [ + "loong64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-mips64el": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz", + "integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==", + "cpu": [ + "mips64el" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ppc64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz", + "integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-riscv64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz", + "integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-s390x": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz", + "integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz", + "integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/netbsd-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz", + "integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz", + "integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/sunos-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz", + "integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-arm64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz", + "integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-ia32": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz", + "integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-x64": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz", + "integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/esbuild": { + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", + "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" + } + }, + "node_modules/void-elements": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", + "integrity": "sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/weak-lru-cache": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/weak-lru-cache/-/weak-lru-cache-1.2.2.tgz", + "integrity": "sha512-DEAoo25RfSYMuTGc9vPJzZcZullwIqRDSI9LOy+fkCJPi6hykCnfKaXTuPBDuXAUcqHXyOgFtHNp/kB2FjYHbw==" + }, + "node_modules/web-encoding": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/web-encoding/-/web-encoding-1.1.5.tgz", + "integrity": "sha512-HYLeVCdJ0+lBYV2FvNZmv3HJ2Nt0QYXqZojk3d9FJOLkwnuhzM9tmamh8d7HPM8QqjKH8DeHkFTx+CFlWpZZDA==", + "dev": true, + "dependencies": { + "util": "^0.12.3" + }, + "optionalDependencies": { + "@zxing/text-encoding": "0.9.0" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "dev": true + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dev": true, + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", + "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", + "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yup": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/yup/-/yup-1.4.0.tgz", + "integrity": "sha512-wPbgkJRCqIf+OHyiTBQoJiP5PFuAXaWiJK6AmYkzQAh5/c2K9hzSApBZG5wV9KoKSePF7sAxmNSvh/13YHkFDg==", + "dependencies": { + "property-expr": "^2.0.5", + "tiny-case": "^1.0.3", + "toposort": "^2.0.2", + "type-fest": "^2.19.0" + } + }, + "node_modules/zustand": { + "version": "4.5.2", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.2.tgz", + "integrity": "sha512-2cN1tPkDVkwCy5ickKrI7vijSjPksFRfqS6237NzT0vqSsztTNnQdHw9mmN7uBdk3gceVXU0a+21jFzFzAc9+g==", + "dependencies": { + "use-sync-external-store": "1.2.0" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + }, + "node_modules/zustand/node_modules/use-sync-external-store": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", + "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + } + } +} diff --git a/GUI/package.json b/GUI/package.json new file mode 100644 index 00000000..09ab4a81 --- /dev/null +++ b/GUI/package.json @@ -0,0 +1,117 @@ +{ + "name": "byk-training-module-gui", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite --port 3001 --host", + "build": "tsc && vite build", + "preview": "vite preview", + "lint": "tsc --noEmit && eslint \"./src/**/*.{js,ts,tsx}\"", + "prettier": "prettier --write \"{,!(node_modules)/**/}*.{ts,tsx,js,json,css,less,scss}\"" + }, + "dependencies": { + "@buerokratt-ria/styles": "^0.0.1", + "@fontsource/roboto": "^4.5.8", + "@formkit/auto-animate": "^1.0.0-beta.5", + "@fortaine/fetch-event-source": "^3.0.6", + "@radix-ui/react-accessible-icon": "^1.0.1", + "@radix-ui/react-collapsible": "^1.0.1", + "@radix-ui/react-dialog": "^1.0.2", + "@radix-ui/react-popover": "^1.0.2", + "@radix-ui/react-progress": "^1.1.0", + "@radix-ui/react-select": "^1.1.2", + "@radix-ui/react-switch": "^1.0.1", + "@radix-ui/react-tabs": "^1.0.1", + "@radix-ui/react-toast": "^1.1.2", + "@radix-ui/react-tooltip": "^1.0.2", + "@tanstack/match-sorter-utils": "^8.7.2", + "@tanstack/react-query": "^4.36.1", + "@tanstack/react-table": "^8.7.4", + "axios": "^1.2.1", + "clsx": "^1.2.1", + "date-fns": "^2.29.3", + "downshift": "^7.0.5", + "esbuild": "^0.19.5", + "formik": "^2.4.6", + "framer-motion": "^8.5.5", + "howler": "^2.2.4", + "i18next": "^22.4.5", + "i18next-browser-languagedetector": "^7.0.1", + "linkify-react": "^4.1.1", + "linkifyjs": "^4.1.1", + "lodash": "^4.17.21", + "moment": "^2.30.1", + "react": "^18.2.0", + "react-color": "^2.19.3", + "react-cookie": "^4.1.1", + "react-datepicker": "^4.8.0", + "react-dnd": "^16.0.1", + "react-dnd-html5-backend": "^16.0.1", + "react-dom": "^18.2.0", + "react-hook-form": "^7.52.1", + "react-i18next": "^12.1.1", + "react-icons": "^4.10.1", + "react-idle-timer": "^5.5.2", + "react-modal": "^3.16.1", + "react-redux": "^8.1.1", + "react-router-dom": "^6.5.0", + "react-select": "^5.7.4", + "react-text-selection-popover": "^2.0.2", + "react-textarea-autosize": "^8.4.0", + "reactflow": "^11.4.0", + "regexify-string": "^1.0.19", + "rxjs": "^7.8.1", + "timeago.js": "^4.0.2", + "usehooks-ts": "^2.9.1", + "uuid": "^9.0.0", + "yup": "^1.4.0", + "zustand": "^4.4.4" + }, + "devDependencies": { + "@types/howler": "^2.2.11", + "@types/lodash": "^4.14.191", + "@types/lodash.debounce": "^4.0.7", + "@types/node": "^18.11.17", + "@types/react": "^18.0.26", + "@types/react-color": "^3.0.6", + "@types/react-datepicker": "^4.8.0", + "@types/react-dom": "^18.0.9", + "@types/uuid": "^9.0.2", + "@typescript-eslint/eslint-plugin": "^8.32.1", + "@typescript-eslint/parser": "^8.32.1", + "@vitejs/plugin-react": "^3.0.0", + "eslint": "^8.57.1", + "eslint-config-react-app": "^7.0.1", + "eslint-plugin-import": "^2.31.0", + "eslint-plugin-jsx-a11y": "^6.10.2", + "eslint-plugin-react": "^7.37.5", + "eslint-plugin-react-hooks": "^5.2.0", + "eslint-plugin-typescript": "^0.14.0", + "mocksse": "^1.0.4", + "msw": "^0.49.2", + "prettier": "^2.8.1", + "sass": "^1.57.0", + "typescript": "^4.9.3", + "vite": "^4.0.0", + "vite-plugin-env-compatible": "^1.1.1", + "vite-plugin-svgr": "^2.4.0", + "vite-plugin-transform": "^2.0.1", + "vite-tsconfig-paths": "^4.0.3" + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, + "msw": { + "workerDirectory": "public" + } +} diff --git a/GUI/public/favicon.ico b/GUI/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..b9d127c23fc0da8cd48683ec6dc4ebef733d30ca GIT binary patch literal 15406 zcmeI24Ukq<8OIMwWd$V$rew02*$N4(fXeQDmsNC;VJ*MbkC0>_$uXnRW^AlxVa#;K z87p+OFro!EDL^rb(Xv`)4bYlGw3G-C#Z(Z~LR>`l_WPf6PhNNL``){+y9zV+o%zpm z-gBO>^PF>@bMCpr|%!MJn|thKL&sIe4fJhAoce{$49&xiw1Po3i7K1?T-ma&1{A6GDj5&z^C&1f)>^Y#u#_tsK5$f)M=Ta{h zr*{4n%=Om3Xkfl%?FZ%>$_v0^Vp1KKNh;gTU)oBbkZ^ufm2;6AVy`}@EK@LT>bhF;iP_Q_F0!Jn|x zft?lLBFdaBdjE%=bFudtRQ5;ozQwmJ(C6{k+zV!hjU2L~UpU-+*zO~N`6g}J*O!8O zDgPY&5X_*x*6)j6t~(SR1Hd+XX!dgaud}uS^E%}aJO^6g(Y}2bSPoY6|0Sr}cY|g4 zGEwu#>&Zfq)7;*!eF=ID_#KeWNATea_N-5q(!-u!0pB$E9{?Tr^r+4YrDWo5$gD&D z2Ver_SD{Zr&xrG6En{1Itf90Q@ zKzr?Hp{+o^KLW1AmroV5Y3F&JsYLE7cvnVpZvMHoxvQA0uaBM=wjsMj`czMh$DvcX zR(Un#|90qV@N4R|{*@!wg82Gd|k3b%QJOcZ91avOv%ylGZtxC>^Rh+v9@6RsVv$1>k@eR(8_kbtB zMz95J#s7!6^U3^9?cM0Bo_#?Dan^gXwfMLXs`Is;(O(5Gf~`P(cmUpR-&yolKk#`t zKHUPI2J%_YQhJ7+12hJW%$r(#uHw0@lK=0@y=q#2b$5j?dVYEqsyk3&zV%h}op*lB zrEc9RrrGSNcHRkpj5%^E{_X+0DC>Riu%6Q2n|kn9_Z{xuN9$70^1N3qX49@`<~wWl zGsupx^QhLo+dKx@LGe4tVmf-N9@~dtb0PlkgRWz)jFx|$_2cK!w0{}sx%F@GZ-8oj zXbo?r{#mdYs9x*+ZggEs`E7R^o%Ozx|2B?+nTieF-8_fAdao}WX5LTVj-vjH`0@}` zYe9Fj%0qhA*FDg`f!4|v>%Y63SH13F9lu-u&(nS@s0mMIp7zihQqIOs8&Hn%E!95 zmb$cW3vp1noL*A+|33? z^ZH-l{-~_=cvjZ@hrjxkMr&I+=o!kJq3fZK(ywN4mF-vTJ)rl)z9Z9JxtmWK>mz+z zR*J0r*SZpRFb)@YCl2r*j@<8oJI58w^Hz_`PyW25JL`Yam+#Rwl0Mt-ZseohZ&L5> zSh-)#>bK`ZpJMO)ui@Jma;9&~N||Se;e*!ah;S_5`sm$X!AyZRQ8yp_1!$hgj`Gkl z+U$Ch4S1EW+M%nm+Vp(LNx$?yuKQJA-xHK^>=V}W&RFZiy$jKKP-j-zQyyJzbK0q_ z8MmI>yK5IY=fC#d67MRztG5`xT`zjqsWt2NLG78B68p{6mE-?LTzgGhv3?h;M<%|0weL4(vhU`(Vz$_xTb0+{*>wx_ zvSRk)_4XX$&WtX8?8))=EM@Dz8@tx1FI$LBecCwSquwsyC;!1*7T=~?9BJjiPPyuyA`9qE!!+wko=&DA(>yliu~a=X5f&^b?eP|ssJBha5X zZ&DfF&(Y5o__beZE$_i*Ir+z(A9MzFdC+|;;9{$HUjP5@Gs%zc*5Rj~Gp>W`%&NJo zXa7miP0&4M==0~dwaP!lt}nW#qf6&{e}2>c(ny;-8zk(dNp@}ibUKe9HYcECCeS{o z`SMo&l-?Sfe_l`0Q>w1h`T5)OU;Nn9Gx-hRACbK`v9W>j5?in58224%4Rh~A$?*P&@+!*f!6yFenbE=JgQuuh%;$nDX&V~|_?ipNUL1>e?P z8tH05k7E1+Hm^<7XLFEpC*L$!XdZLsrf>bz?4y4;b{4^_eD^f;Oh4{vvc=ko`9Spe z`;&IcE3q@K7=JnS)`r%xyI)fN(S9=~X?)7k@9wX@ir!~{o{w~{_yafxJ40;DXLY@s zNU~+~kIrwp_tM$4V9(E5CtAmPzP~3NUHW^tB)wVci2X41-vl(rWlyovyw>+#*F&e$ zpNY(yYT)vZ&i2Yb?ibkL_K;hkmsdPeX1U97*@KN?I0$}76dEtP*Bru|MZ9UP}TT} { + return client.id !== clientId + }) + + // Unregister itself when there are no more clients + if (remainingClients.length === 0) { + self.registration.unregister() + } + + break + } + } +}) + +self.addEventListener('fetch', function (event) { + const { request } = event + const accept = request.headers.get('accept') || '' + + // Bypass server-sent events. + if (accept.includes('text/event-stream')) { + return + } + + // Bypass navigation requests. + if (request.mode === 'navigate') { + return + } + + // Opening the DevTools triggers the "only-if-cached" request + // that cannot be handled by the worker. Bypass such requests. + if (request.cache === 'only-if-cached' && request.mode !== 'same-origin') { + return + } + + // Bypass all requests when there are no active clients. + // Prevents the self-unregistered worked from handling requests + // after it's been deleted (still remains active until the next reload). + if (activeClientIds.size === 0) { + return + } + + // Generate unique request ID. + const requestId = Math.random().toString(16).slice(2) + + event.respondWith( + handleRequest(event, requestId).catch((error) => { + if (error.name === 'NetworkError') { + console.warn( + '[MSW] Successfully emulated a network error for the "%s %s" request.', + request.method, + request.url, + ) + return + } + + // At this point, any exception indicates an issue with the original request/response. + console.error( + `\ +[MSW] Caught an exception from the "%s %s" request (%s). This is probably not a problem with Mock Service Worker. There is likely an additional logging output above.`, + request.method, + request.url, + `${error.name}: ${error.message}`, + ) + }), + ) +}) + +async function handleRequest(event, requestId) { + const client = await resolveMainClient(event) + const response = await getResponse(event, client, requestId) + + // Send back the response clone for the "response:*" life-cycle events. + // Ensure MSW is active and ready to handle the message, otherwise + // this message will pend indefinitely. + if (client && activeClientIds.has(client.id)) { + ;(async function () { + const clonedResponse = response.clone() + sendToClient(client, { + type: 'RESPONSE', + payload: { + requestId, + type: clonedResponse.type, + ok: clonedResponse.ok, + status: clonedResponse.status, + statusText: clonedResponse.statusText, + body: + clonedResponse.body === null ? null : await clonedResponse.text(), + headers: Object.fromEntries(clonedResponse.headers.entries()), + redirected: clonedResponse.redirected, + }, + }) + })() + } + + return response +} + +// Resolve the main client for the given event. +// Client that issues a request doesn't necessarily equal the client +// that registered the worker. It's with the latter the worker should +// communicate with during the response resolving phase. +async function resolveMainClient(event) { + const client = await self.clients.get(event.clientId) + + if (client?.frameType === 'top-level') { + return client + } + + const allClients = await self.clients.matchAll({ + type: 'window', + }) + + return allClients + .filter((client) => { + // Get only those clients that are currently visible. + return client.visibilityState === 'visible' + }) + .find((client) => { + // Find the client ID that's recorded in the + // set of clients that have registered the worker. + return activeClientIds.has(client.id) + }) +} + +async function getResponse(event, client, requestId) { + const { request } = event + const clonedRequest = request.clone() + + function passthrough() { + // Clone the request because it might've been already used + // (i.e. its body has been read and sent to the client). + const headers = Object.fromEntries(clonedRequest.headers.entries()) + + // Remove MSW-specific request headers so the bypassed requests + // comply with the server's CORS preflight check. + // Operate with the headers as an object because request "Headers" + // are immutable. + delete headers['x-msw-bypass'] + + return fetch(clonedRequest, { headers }) + } + + // Bypass mocking when the client is not active. + if (!client) { + return passthrough() + } + + // Bypass initial page load requests (i.e. static assets). + // The absence of the immediate/parent client in the map of the active clients + // means that MSW hasn't dispatched the "MOCK_ACTIVATE" event yet + // and is not ready to handle requests. + if (!activeClientIds.has(client.id)) { + return passthrough() + } + + // Bypass requests with the explicit bypass header. + // Such requests can be issued by "ctx.fetch()". + if (request.headers.get('x-msw-bypass') === 'true') { + return passthrough() + } + + // Notify the client that a request has been intercepted. + const clientMessage = await sendToClient(client, { + type: 'REQUEST', + payload: { + id: requestId, + url: request.url, + method: request.method, + headers: Object.fromEntries(request.headers.entries()), + cache: request.cache, + mode: request.mode, + credentials: request.credentials, + destination: request.destination, + integrity: request.integrity, + redirect: request.redirect, + referrer: request.referrer, + referrerPolicy: request.referrerPolicy, + body: await request.text(), + bodyUsed: request.bodyUsed, + keepalive: request.keepalive, + }, + }) + + switch (clientMessage.type) { + case 'MOCK_RESPONSE': { + return respondWithMock(clientMessage.data) + } + + case 'MOCK_NOT_FOUND': { + return passthrough() + } + + case 'NETWORK_ERROR': { + const { name, message } = clientMessage.data + const networkError = new Error(message) + networkError.name = name + + // Rejecting a "respondWith" promise emulates a network error. + throw networkError + } + } + + return passthrough() +} + +function sendToClient(client, message) { + return new Promise((resolve, reject) => { + const channel = new MessageChannel() + + channel.port1.onmessage = (event) => { + if (event.data && event.data.error) { + return reject(event.data.error) + } + + resolve(event.data) + } + + client.postMessage(message, [channel.port2]) + }) +} + +function sleep(timeMs) { + return new Promise((resolve) => { + setTimeout(resolve, timeMs) + }) +} + +async function respondWithMock(response) { + await sleep(response.delay) + return new Response(response.body, response) +} diff --git a/GUI/rebuild.sh b/GUI/rebuild.sh new file mode 100644 index 00000000..c83c0b8b --- /dev/null +++ b/GUI/rebuild.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +# Install dependencies +apk add nodejs + +# Rebuild the project +cd /opt/buerokratt-classifier +./node_modules/.bin/vite build -l warn +cp -ru build/* /usr/share/nginx/html/buerokratt-classifier + +# Start the Nginx server +nginx -g "daemon off;" diff --git a/GUI/src/App.tsx b/GUI/src/App.tsx new file mode 100644 index 00000000..7b3e8e7d --- /dev/null +++ b/GUI/src/App.tsx @@ -0,0 +1,67 @@ +import { FC, useEffect, useState } from 'react'; +import { Route, Routes, useNavigate, useLocation } from 'react-router-dom'; +import { Layout } from 'components'; +import useStore from 'store'; +import UserManagement from 'pages/UserManagement'; +import { useQuery } from '@tanstack/react-query'; +import { UserInfo } from 'types/userInfo'; +import { authQueryKeys } from 'utils/queryKeys'; +import { ROLES } from 'enums/roles'; +import LoadingScreen from 'pages/LoadingScreen/LoadingScreen'; +import Unauthorized from 'pages/Unauthorized/unauthorized'; + +const App: FC = () => { + const navigate = useNavigate(); + const location = useLocation(); + const [hasRedirected, setHasRedirected] = useState(false); + const { isLoading, data } = useQuery({ + queryKey: authQueryKeys.USER_DETAILS(), + + onSuccess: (res: { response: UserInfo }) => { + localStorage.setItem('exp', res.response.JWTExpirationTimestamp); + useStore.getState().setUserInfo(res.response); + }, + }); + + useEffect(() => { + if (!isLoading && data && !hasRedirected && location.pathname === '/') { + const isAdmin = (data as { response: UserInfo }).response.authorities.some( + (item) => item === ROLES.ROLE_ADMINISTRATOR + ); + if (isAdmin) { + navigate('/user-management'); + } else { + navigate('/dataset-groups'); + } + setHasRedirected(true); + } + }, [isLoading, data, navigate, hasRedirected, location.pathname]); + + return ( + <> + {isLoading ? ( + + ) : ( + + }> + {(data as { response: UserInfo })?.response.authorities.some( + (item) => item === ROLES.ROLE_ADMINISTRATOR + ) ? ( + <> + } /> + + ) : ( + <> + } /> + } /> + + )} + + + + )} + + ); +}; + +export default App; diff --git a/GUI/src/assets/BackArrowButton.tsx b/GUI/src/assets/BackArrowButton.tsx new file mode 100644 index 00000000..e8e60eb8 --- /dev/null +++ b/GUI/src/assets/BackArrowButton.tsx @@ -0,0 +1,31 @@ +const BackArrowButton = () => { + return ( + + + + + + + + + + + + ); +}; + +export default BackArrowButton; diff --git a/GUI/src/assets/DataModelsIcon.tsx b/GUI/src/assets/DataModelsIcon.tsx new file mode 100644 index 00000000..855dd73f --- /dev/null +++ b/GUI/src/assets/DataModelsIcon.tsx @@ -0,0 +1,20 @@ +import React from 'react'; + +const DataModelsIcon = () => { + return ( + + + + ); +}; + +export default DataModelsIcon; diff --git a/GUI/src/assets/DatabaseIcon.tsx b/GUI/src/assets/DatabaseIcon.tsx new file mode 100644 index 00000000..5ab9d3b5 --- /dev/null +++ b/GUI/src/assets/DatabaseIcon.tsx @@ -0,0 +1,37 @@ +import React from 'react'; + +const DatabaseIcon = () => { + return ( + + + + + + ); +}; + +export default DatabaseIcon; diff --git a/GUI/src/assets/Dataset.tsx b/GUI/src/assets/Dataset.tsx new file mode 100644 index 00000000..6b46aff4 --- /dev/null +++ b/GUI/src/assets/Dataset.tsx @@ -0,0 +1,18 @@ +const Dataset = () => { + return ( + + + + ); +}; + +export default Dataset; diff --git a/GUI/src/assets/IncomingTextsIcon.tsx b/GUI/src/assets/IncomingTextsIcon.tsx new file mode 100644 index 00000000..fb6ccb9d --- /dev/null +++ b/GUI/src/assets/IncomingTextsIcon.tsx @@ -0,0 +1,20 @@ +import React from 'react'; + +const IncomingTextsIcon = () => { + return ( + + + + ); +}; + +export default IncomingTextsIcon; diff --git a/GUI/src/assets/IntegrationIcon.tsx b/GUI/src/assets/IntegrationIcon.tsx new file mode 100644 index 00000000..5553ea54 --- /dev/null +++ b/GUI/src/assets/IntegrationIcon.tsx @@ -0,0 +1,42 @@ +import React from 'react'; + +const IntegrationIcon = () => { + return ( + + + + + + + + + + + + ); +}; + +export default IntegrationIcon; diff --git a/GUI/src/assets/Jira.tsx b/GUI/src/assets/Jira.tsx new file mode 100644 index 00000000..37088791 --- /dev/null +++ b/GUI/src/assets/Jira.tsx @@ -0,0 +1,55 @@ +const Jira = () => { + return ( + + + + + + + + + + + + + + + + + + + + + + ); +}; +export default Jira; diff --git a/GUI/src/assets/Outlook.tsx b/GUI/src/assets/Outlook.tsx new file mode 100644 index 00000000..5eb0ebbc --- /dev/null +++ b/GUI/src/assets/Outlook.tsx @@ -0,0 +1,25 @@ +const Outlook = () => { + return ( + + + + + + ); +}; +export default Outlook; diff --git a/GUI/src/assets/TestModelIcon.tsx b/GUI/src/assets/TestModelIcon.tsx new file mode 100644 index 00000000..6b9c45f6 --- /dev/null +++ b/GUI/src/assets/TestModelIcon.tsx @@ -0,0 +1,32 @@ +import React from 'react'; + +const TestModelIcon = () => { + return ( + + + + + + + + + + + ); +}; + +export default TestModelIcon; diff --git a/GUI/src/assets/UserIcon.tsx b/GUI/src/assets/UserIcon.tsx new file mode 100644 index 00000000..83c84c0c --- /dev/null +++ b/GUI/src/assets/UserIcon.tsx @@ -0,0 +1,30 @@ +import React from 'react'; + +const UserIcon = () => { + return ( + + + + + ); +}; + +export default UserIcon; diff --git a/GUI/src/assets/logo-white.svg b/GUI/src/assets/logo-white.svg new file mode 100644 index 00000000..20257361 --- /dev/null +++ b/GUI/src/assets/logo-white.svg @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + diff --git a/GUI/src/assets/logo.svg b/GUI/src/assets/logo.svg new file mode 100644 index 00000000..6039e9b5 --- /dev/null +++ b/GUI/src/assets/logo.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/GUI/src/assets/newMessageSound.mp3 b/GUI/src/assets/newMessageSound.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..9400b22a4e90a798f02bf1eebccb9355536fbe67 GIT binary patch literal 20942 zcmeFYRahHc*r**s2<{fN0YV56XmD+dYjD@%#fubep~0Qv4#kVROK}bE6e$iZ&=v}9 zp~BDm{U`h2JKjh8KA6d5UDsS|t;{|1%(I@TDhPo9|G}ZJr>A(oCkFs<(bm2WBI5jl zD1HGH>VI1Q|IOX+x&PznfB&fJ?(qD+<^D7P2mqi{1faQx^&Z4MLiaqnNBJI||1iJD z;U15B0`G~q=gog)-&1l=-8~)m4Bj*KAFKE5-gA1-^*wj@{MVEJ`uyMie@CZ??N6ZQ z|K9fhzWskQ@PBOv?tU*QAV|RKuijHksb9V)Z{K71)M8xu<5pfkjA;D*j-vzPHxfk; z*DDil{CFmDUnQF&&Swu@oEvDU%x;KL*;mB$qEp?D9YHx*Grp)wj}!njdAg`$URu!!MZ}1jrUE)C3*@e3Q=RpAqM1r| zpH+_0?aY@bT6`!PPcdvZp;(dI*PfA1kt5;~$%|%}EBy#wp-#~VJDav`Mr3m;E%f|q zA{5aIuBGKJ&*Uq0lG6TTYxHn=Hb!pEw=nQ0y`9-O?0u`FN2TzjBJQe?pK7xeo-WQp z_y zrF_FFyQM~XD^G{VSrV#krD5{^FB$oi!K{v`C{l+rpRn{wr?|$Vo<%q5EnD6e?InS> z{DhYrZAFX@bs_54qCLARdz5_;6+}-3-I5X+6$fh@&31?6YEyBXXrcDCC#Q1{3#%Ds+U>k1# z^LO1`Qytk-G_c)Q71=8DKJ8=SnV!jIL!LCDp~gto43WbWekqM$|G;0 zC(OxvojmieV>?HTilg{@k4mrCO-+o`zWx00S^l{-r+u^Ss-=@X@98XMjggrCL>G*< zl=ce`^$Mbs&CX9QAvY5~0ZRQGf#+r}nCG;|f!=TuB+-!3bRr4(akt_!l8b-Jf+Tx zGAJ|612eT#S;mhF!O%P}u1k){$!3N{97XIGx^Cj9&IU$JnKjfQfxG5a{zeB;3eA`M z@*ix77^U(aRB8L(e!KCd-~P`baSXoD>qmH;7{J`@@j?j-z?h$_d{YI$wQ5F)iD?5g zWktYev^>;;{o%mo)P0feV)1@O0{l<3l@{eH?5B)!V4U8_8Vn06Nt;&wQQ6= zd9m#@RQN{NEM4SDv|a&JKr-PKKoybz-U>-)$IGL)S1Uo8rhAg|z|l8*z&72%r}`|> zNOEP|5CUgP{dC#I0lYz3f;6;oFbvoG8aMk_j_6g=+ZYwxiBISh|8h-8(M!CNhD6^` zPPP`0O@gwRu7z9ag4Go}qmUWh$x%ymul%AhIg2}_iJsj=EQ3MPt;*-(OA^~~G3l2dTJ4-%aRh=TXGHnQe_N@Wyb-A zboavmV4OeiRSUBp7k;MjDD)5v1vfO_v4i%e7yap=yq?o6vOma@%wJ!#=P1E$sKp(n zChbF%7?PJ1hmZX33S&CEvhm7uVnVv$gUoU%Ac$kC!u->_-_Uk3xrwb#@&_2d!_+6aH=4ASv;_rby`6LEN`PQ{uz2W@yn$a;^b zPF3`QNyhBgAHYDvCSLjL?-?}{70}!aNTL%rU z4lNKPm_l=)5NynjCT`N>*LQb+2qT;@lrt%) z0U6xTxbS=ppTUQyUFJ!g{~V$NfsyvJcWyht@tVNr=T-nn{#$`B0*qLi=Ho~2ngq7h zwTp;&XK4`a`LJoi_yaSD!kXYS2ZzLqm{@}s65C3Tn4hfd4a8obCZ6L-M#i3i;p#OT z4{j_+wZB_7!qI*>BH@U0<742x~=xt8BD0|r<{A;KrvYPSJH3HFM79(&V6D|h&psqn_ZrT&85)}LRsAhv z7XXNwQTJWdP1cPTS6>Xmp_pjOv`B{uoCvr;P$-_nRHN{D3+LItUB(On+C^k&^=h`C zb++riw>kNvRcF>{)#DP1qj%MlU&cJg2o-W>xtk`a|8W z84C+o`!=sym-G^yN&`_t&(T^wbI;YEuQT@(kma-c3>gdSW2~OnXrSG1SE#ySqa;KJOLOT9|JVUUIdv9d6?iC67+?7Qi%goVtp~I`3L$ z(9h6tqi5S*K-2z1YphTjufT&EF}U@?h+~}7i)p&t(8)mqEgR>#?+sl=skG_luMAAP zKTmhR4abDGiDG4nH3Z%2*^TGsH53vHPuz( zp90n9s&jNhBETQvymLO$yp<&XImCc*;IhNM?66rFM6bVb`cz>o62V<}7^hC!VlUFe z`78bN^`XiFUc>N13WjLGvA?->y0@8lP6xv-mTVOixZz`JgfUw_IwlJPiZ)?is~lfQ z;!I{T4k%!Az0VroJmbfLm9>g^+(v(u!-kBLX1f|w8r#HPuPQRDSDHASKPc|n4s3D0 zIlW~HE?Te54*j8rH86YNs? z-YsUKM!o~$eU(e-2pP(rk^f3j9EKs8VJaRlCQ)}lVSkAmMm&$xjPhbZm11Y+))=rB z35{|zC$e#e?+fhgq%|^6MsX#Z&!45&5Za8mRFAb_mmN*j{0Q^q;-WollqZA_-gl0i zg=5L1OL=WjDjKljg;D{ybhk-9okpaMskT#gnlfJ8qjjUymQN?iMB&28BcHvN%;rXE z3Hi&Cd1CAk+QYisvu=Uc4+26K(0)m&A)92=tP@#ioW1#+uxK?U&bJc02T-Ar=9TuxRUyb7s5ScYuQoXcEjQ$K!A{O zg-JOpK8_JpP6aVt5WoZ{##i%8_`l7>ejc)xoO|8mcJc& z5gCKE$cCxiiRz_*OK(gwHxhhEx$hGwKvuX-!@^SZ>ggwGAylS_lh=MaiUwg&NA@t z>|Cb5{}q4)?q)N6qFjXVYH} zzG179n5GF$Jx4x!F|+X@7s<6 z$l+5?gk=NDU$K)QR3Et!qd{q0>AZ696w9i+az^7YLUS9o$1i^x6|89W@qDcbX=(Lz zA(d4_n*4k5&SK=5w;QjLQIAjji1nfb@MT(k%BtVpM&4#xXdNt>qmrmZD3_{ivF$*l zna=HZn^B8~cy>eLw14%%Ln7Qp$NMYoKDY4pd))P$){P3p7fC1(z&uzWgdY}9!vT&M z!-a!@cter2#Hcypa)!vVLQWaBH-QjTYrUUNTp!D@DW~Fb4ouk;L2TvOBuukG#TV;Ya}w>-C)4I%3H{J6QfQqfD07;c_naqI zZJ1Osq&4G}I2#l)xe!!ID&ohgl;P-~(%t<{xmZr^xJ_5vZT0n?t64PC&U5x19d+*- z)svU3=rZ!`L*{KzMa-`DeTvnt0e}b-CTz49beS1(qh(>QP~YSPpdLcV(?`=y0xD=+ z62k48kxqX3o|4nN^UZEJgq%A?LAss(JC82Uct|VbdrIA-k`H-f<|^^^4Kz(DPgzz3 z>&GMm&}~0V3@3%2@t`+IimBZNs{K&C8&9CKIv=u+-7_p4QdT#!>-pLp3a*xsN45X% zSad&*wyIsbo7X$n=Kg$m_=byrdvcrBd-P7AuYhq?sM7Y%BmtW9BD+gW}?&9=a5qF>Q#$WrMs@?9VF667c zwpDyC=|1n>|1s5f{~3Dv@^W(JNsZvw4S#I_y9}kG5UkIw@2A5l;Q#{_3JTCn_rpsV zLjoY^V)skc=qROr1)NxRlr<_~Q{X-=r@#OJTWd}I^9BG$RT$pR*a0B(_R5_-09;s2 z<(&`)2Y?iZORlkqsFr@^nupqIU(_XIq!yEj;Ok5=&;bM6%~D9-}#+=F@6h z@N?_5-@z8c#t;6V{otjP%u2yg7B;!={4=;Jfuu%}oJ z_TUn#{~<_&9`+OrX!GUl=S5Ot*eLObcn}N+9;Sh^);|5p1kM)%6(-$vSl|-ncpOW&d|*wvJlSS$JFJ#H z6>CeegquLlDZrF%%ia1K`SwQfjMML=_3e-9+$4Z#c(f*~pv&JABl_1tbpK^WF-}l+ zG#v=|u2KW{G&Mf#h+Q-qGsvj~Wg;SGfa*k~Fi-f$pPi`UtS|yjlO=BDQdI_lWz%~> zdJEV*eUF~Q_lZ-s@MgM9KjAA?aNDYBV6%FFsjm)lRWOK2D(R2PA!Ei;t-Ic{fajmi zo~9{s9eVxosGn?e@Jkncz188xI@ADZ3WPyAI=c_*$q2k8^>jd!^F zcfZFjsx$BY<1c5xD;s*-#|z%+=ieU(i+DI`>54t2ydMRDk^_OVU3532poZJC72S%{ z61DW~;_5IQykW#kBGWKD29MZCYMsCZZD9Y1(7POntj~(+QQG$7mHdo8-7*-Vs%(`S z;vfItC)}q~*y9JEP0aagnX3wzgV^7EbhOu85)mT#cXJoD+I|^zXTB*_1TB+zO8@l zx5pcek#yHKA74wp^o$-sE;(BeURc}R9sDM;`|rjr$A`T;JSm;U>W1op0hua0G-LZk zR@AOmfPgpA&xl+g`Y0s6a+vo2gmU!Zio<3zcb+@IQP)`V^BsV(V#&iVsF(5t&E-Lx zoSzW=jtyxwxM_jvR|gD1m~+)fy_T{zFB$Qm%qpc~AEXI<0$dO&A_?*S%WYnVoY14B z4YpIyrcZvfU-*C|1Ye)66l=MDre32j$V~Nbt!I=D)Vc;Q=cKBO`ks2p^$=WG8`9<3 zx+}T5uOql-zrnhmd z?xdleOcvRsD8ZB_wd}0-*_rm)#p-JwmxW-1TAn^r_Zr)P&3X-4?rv<`^M$_!68l*8Z(NXHf^xQ6dxL8}9tXEwjs?z=eg1oPZdkBf<590@noGLRJ(RssIy1aY z?{OHTX(;lPxV6pz>(lq-?|uQx-OA@X&;b=83<}_DZ~sTC&%+2NUd2?HYXd<`|x;S0V;d0e$+h+C}O2+)d8Gf+==iD&thN3}drncDKz zsqXy3d{7gbf35a`rCYS^zFUW(h}09$q_e_J772y~&$W!a%#C7EzUrJZ{rT^4RDOSJ z7Ms&adhIvrTC2=V^k*I^G1;#Eo_`3fhxPi+T8eYCj$fXV`A~eQUi~#?oHngEryfmq z(8aiHms03V7`+SOLRg~!X}T|ed?LWK+=4@ScV3VaV~T)I^)g8^z<*>o4kQVqwj<># z;Yy&88u2fvkYt804|!TRFJ5Ia;X0_kdPLy1?pK^(I7Ms5puyo=9*wl>oSb<0X0@Gs z(lDKgTBssbCE!JYvHetkRe~TZH9McDxWWdLT9JzWcO#X$=RE9=SxaymY$H55>qUlQ z>`I?z!7{^CjVr+RA0G_ALYMpBehoSw?1yzeCCd* z!W*FgPy*AN)cbDcD6AwHyt1N0Qy7>{@m+dSl#YL!LFHW_5C$VtIgOA6)& z;_OM_zc!SqlT65>23q<|G8rfD-)0K`g>sx2MSJ6!m#)Ijb(8CTo{?P!^SMIjr5`@$ z^EzCQGJf)ZRLn`>f4%c!?Rsn?HoibIz$QeoK$zmdjCR(4seJ8|GRVJk|u%2_x*knNy z#lR+@A>AKT(s&_fzEPxQORwds6p~($)yIM|ddXu~A6R5{{wOTcc%kw}cRJVYgxu8Y zOGlT-_YZF8O)GqTS=X8$M2`d;4DlLt9H}#u7-G>WYJ3;%fURy5qhi|&8v8G0?t81_4*&Z1X5=}7A~}YHXCAU1)Ar=T(8@|a>Wy05 zkSq^PhJuV6k##|z&`PdZGk+Sk?u%NjS?QNLY+34?vJX7qC#vfCBBsWPN%>ZTI$=nD zzIR3N5~1?!PA53*RdQFngBn;Lz5A=nh%-`Km#xBH?b?6j6B~hgj#(odCdEd}>MS)n z0F|%d|IF>gG_w?J`NSnkY&-X2?%2<@&-mw0sRPlzjqq&Ee^xNgjMul-gvM0cf(-(c zrpn6jd_~wFa|(PGPDqok)lQ5)Fug(BL)BKKwOY)e-`|>M?nZ`+edT?IS=6ygZbZPF zjL^#sve#pidQ~Aj<0)%c^9P4g9uL}W`6W!tT{b9@n+aFB4{J zL#g@d@1EbCsV!#7(OBWc!N=o^p-jsiT?P6p5{7v>0WgXgTaCehUkRfRwMHE*#+Fs) zm&=mo<`ym;5}z3U(TH;Ai$5r#pe^EtK45;VR>2&>8Nz_9VzPm!IZ<;h%akDY+!zPj z=&fU#r3{F&(=LDh zel}Gt_J4;Hzh#VLAcRDj;%Ph>JzTJ+F)1#g_N#utB5u?)-O1}UcK@UhywEt4xFQ9< zl#NLoXAR~o#&5Gd$>#4|%*#vSM?3=Ro3StEE8Rp?WsGA41WN@B`Jd=m!`Y%8swPV; za9}VPzp^Xp^Zf`vTcqot3kZ=f>H<}BN4Do}N_GB3z1(Y-GD_YsTY%}yP8){mdz5fb zzCI@~o{yPeuK*?VaXk#G?Y~bl;2E{PKIKiwuL%iJA_haGYrK2{LSzY3Q20RdS^7R1 z+zc3=rtYfz331p?+P+=msM#mR@QkN6Z@Gxomc*zsIVG4eiuCJhKc=*TY!2wB>v>y) zvrK7gn6da+VlF=E&eobvBhSoypInn${;%epeuE#c=r??g=-nazmgGF@it%bGU{;v0 z{W-9%60t6(sYbgrNX25RDE$SGSAkH1kSG~`9j(M&Unw03z`>dO{5EZ5_2JMh1v?2I z5Iq{85sniQLRpxExO5=SB9NSj0Jx$GrNd4gOi%T?)h9YtmTC=W#MDwZlZ)OBG{hE1 zV_tyw!k(+hE9`!Hd%~qjE1ikjM?hsc6AF~%)Cu7~s3op+bBIi+{snQB|5CR*y2NM4=P>`p>iXs&4iA+&{oZJz_RPt+eVTa6E1nwIy{Qi?olx6>X^xcxys} z?MdT`_kQy(q0-*ZzkX{GEZyu`Q>{^iv_JdLq2zG92s`!D7}gJUJQ8o*BD)PNlDX@o zJwI18z#p|SZuLGsz%v7|kQs~^&0BAgt4kcdG$`-NM03HkHF+}1}#%Or^ zA-|0i-jIRbfU>O%8yY$l-;RDRlT>6;TdtUDWKN!%ai?H(0#v-_%&k9q!gydRl~*zy z*cYP5<7?Qaw!%TIb0_4wEx6?9Km0Rz=Sz3n@GgytH9wIE1qIZj2ID+kmgrTejW%3N zgTjf>`VDh5vPn}X3i!-)ruj~fe1j>oMqm66?jMrK63z|*07T2$Cho?euZzeSO`^+Oppq8~ImlErkNC@N_s&Q1~UV{enL7k>TovMD3|jS4>3UEE7}hv+n`G05S! zd3Q%xA?(L7)WTPSi!aN}CcH_FmqN^aB#mzzmM(KZ5T0l^?Z*%1BOC6dJb1}}`I9Hu z7k#eR@ul>2h2(u-2~tiU{G&8H`92>7R8iodAi0Lg^<$EN6r`aMdK5<^n^uk_IT7Uq zl!m06aw7#x>(mX6MsoAGl+>X-nQtc_M#fB#7l2E?IQXiBq*Vl+)i$FH4xfqHNUlta zI0j`V^Lq-KF^~2FvsSP_E2jX&l7y zTSjFyb1{_DHZ@VPC+7R#vq+CIst{OrmzSr^4UU+->chJr3OOXmqhGGSpI`#HS`*II zfI%UQm2C3~Jd))K=}^+Fu@vaR?`aOvq76d-IW!iIXX!ZMQp4y)?8ul0c<$I($x%YL2x zzr^SMFWcW8d6O2uHThnnX0;gn<@}>e=Y8h4%+9g?XKf5#zrYTs#AxVJ1Sc^%44Q6% zDJ`rpwvM+!%Rwk`*rDG*Xxv7wt->L9-i{A1r%FotVTSwH4wg+dUGGK4)rxWZOj}8z z($SwBZ}s&yzBUbeC1(A*Lb2r2rrkNOxo$N^j2fw@GP&@&^$1!GKG=49>v+hxBw%R` zNTL?te?cW7Hwd_#`ZBEuD6hU&CNrfWgy0{IG?fO0Y!F~};SOjaM3l-Hh-L=JDC<<^v4uh6F{0WBskf_4~kK_0>t-if}r3!h51wOO6^Hr{*NOZJUqX>`9k3aJ^dZ zY?$wU^Zs4#r@M*`1fw>AI1>q>+mlW(9)ajFD2Ox0|O$;6!uH zRtd^OpC9e$?C^Df4VMEB?ce8!^zVzKj0nh}<1=6&RM1GTs`mLnxqq%cuBI%^06%%B zgJ)>8SJAemy&2*(02kAZ64RWbCVgH_h#{Z?0kSbF+zdD&Sp*DYn8JZV@DyQa=|}<+ zEsTQ)m8+Vvmi0aIkcg!SZwvpO)euWGmAMmL>8VGF8E7zn{N1`LttlJtvtbK@QY6<1 z{wI_uW!&+lsYQIOgVFU-gFinhsA6ZbRE1*Gc78FZ%bwNwC$Z|R%~s-;1QPB!GN2VT zR#vQ#pfM=eNH4{_vi8#Z_S%Q9Vh}2gJTkqyNK5h;Qvm61EA)QZgUL!>iHnztHx8_RYht#(W&@~8se0? z5YDf=jswT$z77yQ#8%~wG7Y67f8w(@%m$+Je-(Jk-=TtLXk_l-tL7Q?`wL!Jn0JzcIxxo-laNw@6Bk}9t|BAJ#tBVt?`@IbrCjw zsr#xX)9YDb>P)8Y@wMY0c^LrDCk=NtoH%^pgA@XB0jmXt#rSg65G9*8*+Vu~p;HHx zwD`TH$u_o%l^CnopkD5}nYGWDdiu2_Tulf+uj+FK!{ft?jeUuG@6t;f-1 z*rZrQBFGitGz9U?_;5z>kU~Qf{7r?aqEa?4mGrvgXA&3O4|Y(23i4>ZNy*?BReT$~ z>fGmti;GV-Czfcly_%IUo#~CxI=8(F0S_UnF(H=K=cJA9ydLf+7L1STmSLN zSoPOF(%_jiRVI6!jBhcO#qQ|AXs#tmzQ`&!KPeO3s19*a?xrv9BGS&&ypQ zY#Vg$rGCVSwi1!0oOzsnD5fyk42(itAj+hszdQUw)QR z;?$QWU?36g%os;g*EK0ph9U@Cg({HW*%i7tsRZPyweoRW_@^as!j*>kiiEJX`p1P| zV;+?1t}2DPJI^t(IkAVFz6#r^jQ=Dxpt0eLRqbu>%ri9FuvF~cZ3{Q+H2egMEO(~- zzAsAadt*s-WlZ~wCs1({t~E)|1EmvP0NAFV6;FLKKudv0p5VoEK1yWsMT_94`S_*a zj8T4!d>G0jUZ(gR2QPu|B=de5TmSgfI20EkW*?wv^Kjn*Jg&fR5Z3@g;IXfyv9a(JWfIq6VR;7 zFzvg)u(m^Yzd{1qNPs>ua-?kyZk|JsDBuC)!hFY*t+NOyFhF@+Osf2!T`N?-w!k3~ zU;y{?FGfIq8)z|0|H`Sy4CR9KDiN*VpVb;|dUtl#hJ_fUeQ$N1tR6j%n4zFamLx)s z6EX03IhrH5bcnJ=mQ$w?#U~mwc&1D#RCOBN`Ym;7#wFg$YQmhG&v13DvT-G(_CxT5 zJ>=%K!JI-tr)dvCMMFtaSmW5STTSIusbKQwb z#HgU;gPZ&R{?L;QVNl|aP{Pe(TcfC9N~enZ$~le15*hg{DDDn^rW@bgs8#opiwZrzJTZ{urlTuv(*Ot2aQu%$Buen05vfX+g$Z&ju0L9e7o!gh zJ*`ztQ$pq8x(c9h*adHSG)!9{XS(xT^Sbwxs!MS{wj^2Nd66xu#`&6fs=l!k$F~&{jZ)olX?j`X&l(FAECKP-sp_h?2jqg#?8)b?kKWumkAiQ&uZo43oZovf}D{Tark%ZAO|LLXP7XH)(8|QA4WM zH038_arGHXniT8G(8=+K#_eu-yw>kGSbD2CSvzjToPO7*{W2kbmP%=2cwY9&KE-WpvHI&GrGDR06d|*s zjx7{my5U55GvKqvQLX|&b!gDUBaEeR?FKQWyr?AR;WDAjuh9vIjUn05NuH@aUJz>^ zh5WGjE@E^CihIg>As_)J8OL~sSBi`DKA1*lQNeOwE3~ZYR z_iou{mTNv9krEP{`W;Z8{B|-oxWL)m_16L8D~0n68u{wz10G7QJb_}*FJCqLR84IN zn5}=&SO=_mm;++23Ffq8H z_)@x}h7~Sx8f9dgYwmC1o6M6O+m%SQ7!IAMeMGVlty8a=4iA@$&Yb-y>JD)lF0*5t z9`FAyXXTa>6b93dMb(?~6jv;-=jbXLiP}{4xJWW@q zCQZjY2Wc_etg05dRjdFi#?%eQ>iQVja`TKDdG=nHf8Oh#$9 zz^_^UU_iJYP`x>BOP)mL%AWub4!FqV_tYnqVP_QE+3{KRC}>~)5Om&6u~;e$R4W$z zfcQY4Zt=uvz9=wdAMu>wLFy7GIK2jnx+1sYWHnWW4&cg>0mAAgSkdvWkmwz^ov7zD2pwoHAc7w$RJnmVnq(2``p*Q zyv;ll^!NRI_D65cVT$uQ#HbzxkILiUalc~urw-B1KdO?~^{j2*o5+7_Ct2vNHU}B7 zLm@bf&(yRj*duWZsp+{Bn|2UNU@}?gNDYueZV6+5Ecc59WQ}x_<+0?`m^cBW6S=i^ z)ygvMucPosOzcgDGGCqAyBLMo@99PcV`;i}KfSVCAAZK57p==u_#nAVl8{$JrnY30 zj7VBTQA!4=@-Z0FE{*w^VrKffk^}ku0>s%1GbQ;wQ-}~766HUs&*iGU(R?kQ0-wHA z9%d590B5I~bd@!M;erRS@TDwIe<8uvQ)^*TS*X{r93LY$0Dr4eS7lxW$WS`U{H^A4 zN8h-J1h2TT2t6F}1^w{~WrD2Zz2=(5n$i!Pc*ZdPXKfvVT&z zUQJ*bdu=R+bJj@}xlyMYV*H;i=m%ev5%3&7%V#m|+zQ{Nqv)3P&z&!7mZfX4UO8r1 z3eWi>Urz7oBWR;!r)FiOy{Q~PwiF})REru6=dhQr^H-IHI`&zch4(Rk zHwd@iNrZZqtKeRTyCqg$U7tXa$i+G+wO<68{g6fs!TT?}Xvcp-(- zJH@lD$aFAlF^p`ztq?96>}T=NftdRM+4RNjL0f`YwW$<+4Gx(`LPlA9@&^nDlWE?K zYJ7=ul){6GCE*F^9;Z&bMdX8k9-kY<*>?R8=QXu|O~|Kyi@q^sTKEh9+fwNjBN4%p z>X%X_8(nP9oH`=>E=YOkgzXm=RV3adu;2Po;cV1u)#z_0P*SE%NLDbJKT!1EB>5j~{3~++`QmKde~!%E{cF-}&9x4i|vO1jFEOF8P+2 zf^i0mIY43YKqblOtAU-tg9e$xiZ4Od>~xEJXd;MYs|kq+7{!6Js0i{*E_%L!_T9j< zuQqB^vtip|@zI$q7S49klE zbv211r(MYa^GBt{6#1Vk);1U2SdIK_`G24zr_TMHxJCZT#-oU{oL?5Rbf!t*EhbOr zrtjrQmB6q_X-bUEp4MVH>ixuia6~Gf@R-3Q;at272!Kf(Pll2oW7*w2&8E-*d_oxn z_1h&9Mq)4kFZ76rH2b#rLh_#mai&S2lkF@#^YF# zls6D3npA0dX{6{`>!56)KV9*2o~?c5Rcdk8oBtg8jRC6$54*&O6ppoIX>MHI$XB{PQ&Iv!*=3rPmMJ>fCw9BWkh6I2tVlr*^^ zc+x#?_k6`79epTjzDE(>MAtd02>yt$q1Z@``RyoIV*Z(J{+EsG>x!-Dt+l*PP2$$9 zaRPuJ0;l>dzRZQeZ%4bXA%3EQwN2PI#dwg>YBsnY!hQ&Pk?b$d(>X4Fh0k*+2mN;Dg=+kB=CS z$K6qYFl6-2a3&oH4FYO%1w}-_{eH-YPFRT%$}XIE`RrdfU7FndE7NedO4-||j$98Z zp{?H1UpDBvdq2x%8n|xy!1H+BI3<}cz~4=!)VvtrRwUuk@xSJgNH-92yBFVnJCnYw6DDaF3o%#fHn`E@;4oYn(-r1)Rs$1bPNe1i@SL9X1NK zo;2fCdoM(Er;9HH%RHO(3%gMH{> zN6ig!JhuL{loP<51A>@R8WBf{=hTd@BL*Qfrrd=J#dZ`Ere-=u2{1c}!#X-5V;TA? z?sLN8Y#Xi4JR7budx!-n0q$N)8m-f8l#u!?GR?g6m*b3M3g-_Ge1)nrROqo>KUm$dmfiB>^__Qk68WMq2%CaHL ziCk;`)ox>+?8w5^e4zCD^X%Vr)f=sp;sv$$jEQ#L`i&%-*)i$dWtQ|JBHC{gwlimJ zH>+CP86S?Gi%hv#=iooeI#~U_?FIj88}AAr0C1bC0Pw$<=n-b?cKQzh)^kEaF#W{Q zzq53K%$gW-+`0qlZ;^oeH0d=FHZjg_vHxtIhAUPeN`*5W1<5p#(sqv>S3W`yzMrYm z_VCiP41aa;v*Ye@FDYK;RBh4^S0ZSEg3539aDj90GFdo!Yk=oz$>+sEW&$OJtr2D{mI~3>F4~IwKc#zd#LaB2;h$n$OTt+8I1{(4vil9Ng z`x{kBwKnxz`Eds=8(VoZZ5Y*C{a^vdtcd1;Z+}x+Gf?3NxyinU_CSzAiiKc&HB3Wo z*^O~e$Da? zEI2o;!DQ+Bj;k<`6N0QtZ3%$Wwb=32%V}_@wam2*CPq`-^)9im<Yh zJGqM^xPS(2$s!(HK=Lr`IXTioJz!zB6ON5{APCELFxmM`^U&1Y;qt9rbdpNG@=_i)!+MWfX=ON5&)D>NM}AnJII}@p%sV@H zfH#pF0Famt5W}L(3F)+{Z{wASP@kiJ3Z>G76SF4{$%L8a;Dw$aPICb-^Co|2lb{ZI z$q8i{g-m`2a&d5&$O($Zpu)xINC=`scOxqv;1wk*MZ6#(nm5DqQRIMr7u~s0v~+?g zO*L|`r+I~*R_tSCr8;=4;`5){@mG!lvI5dX{hQvDe1aR$I5_pH68<>Pac*P$h0ZI? z$~qe-FdlNZD7FVrEf`uiSjS}Xbhag$v?Wm{<*3Smk)2B4_G-hbB><&Uc{Ldi5C7FWJycb@`?#^0-04p!SsNJKN@XhOZWqU)*w{a+5|pXjx;>=jh7Xl38@4LAY-^ z-iSwK&!M)a#i-ZZ$Mo3wQZ>l-OQ;Q!;Nv)*hVXOm)b=>dxGKu z7lT697ujSaLGeWDBLH7>bQqD;Ebn`(7iY^3UM-uXe}<{1PF|-g)v7dTiZR&Yi%igzk|m- zolsW)fEC*C0xKB!Oh*(cgL*aLNi2J%7V9)7|DQuY3Bi{^gIH$OotgTzK+niLhQk|@ zeorPgY3Q8-1ss46FEL^>bfVokyeFUML4VaWeMJC{oMRt?E6pHOKR-6&R*UeSec zW%Vtd8rSYca}+BYD-3IlayBlxWEvBmo|Q@JxDGkA!B0PKM3pS81D~X5WNbA zVDgV*K!X5V$~2AgAAAu4oygH1-31!N8+I%D{RBMJ{FO1GTwZwTk;&Wub`)EjjOrl+0t~{7D9>;lDQTi~a(p1pUe^Ot*w-Ls z4}&>3z$QN9tQBzPC{$DN1UTjK%<)T=`w{3f_WSl`zU2Bu%;tF;xRK<8I_x~zN zV|rCwx3TrZGnBQ(;7l%{lDjdbMwx>rK{j$%-B#rmkEPkIlKb7Y_!(E;{}(w9#__T~ zlu*Mfb~u)&^KZ`7y+t`fDN%_s_Qj{?Y(8tLS$d@>CG8r|Nn-VV%-8?`yhxw`AB1+i zf?_Cw%M4s2NtRi2Nq__)Bhi2vWQr>SC$yoCGk9nm$OUVg6NY6Yx!z>u8CebonaMD5 zx2m$YTFqN&`qhZ;ODwA^D2`l#)wi8hv^bh!#rzIo%x5QMN#@C7A(>{O8i%sasMAaw zvvAcdM06m_f71b9qLUA}bCku>og=C>D=*>o3?nP|`o_{fug46TX_o+wL3Kw?D!NP+ zf)SN5&v zdXfjsUQ~GBRcs0aKw}moVpB}5vv!vZOMsy5amBGZG-=kSTZ5pvvYA=jd48TRNWtn8 zgr~={a>XVJkXQo?B3%$paAF*p1!!^526cg?feSLw5+a=NaVD$ z9uaj>X54ozu)5gsOMZ;H3v8M)QEGNXryLA0qfY(O#zohNj0?nr$RK`BBa0ZcCBMu5%1l>t=rlq^s% zgR*#q0igo{lTdRiBT#_gs%DsEfZ|110tm*5!wjv23ilAIUS<~Lln9ebNds>lI;2qw zkSYfZNmK}NRu^PZBvU3;<(DH99%zWLtc?ipuT4V1rjrt^*`!O`;R11^k|aio&=cwf z$q7p=Cn75ZVEUw{jny!S*|tMtx2O|nox!5^B!iI6rc*J@vb@ODV?Y1~lmHC6948rR z2;W#p2ywtzaG|G1%q9RjdF)t?A&|4w7Mc=-$^)U}!vG9CY%soINE@6(!k7dJKmn%$ z?UY7&xR9k@~P530OgL@=?zGVanNR>UxdC=-dM1{jOoB4UD7$RI0?GlG}cuVEaWhs#%_J4_OH|5W#K(aQrT8BGc!$%4knmSb`DO<(lthWt`gDl z;ceyUeROCk5Y?ZQ(3~-M>k#cx5h!cxfr(=2LH zO`d~_S0R8RBS2(=N~1U%lUhlE1}0^unPj9n$Al7Nrppk5Kx>i9M6`}%IWkH6!?a7A z&law7YX<_&B6^q5JnexHkyynR#v@^tMvJ_};`wNiD7mCC+H?66k}9B7Win-=AD3N} zi4$)(3UYfD1XR#@MYWZJ;ml|-vi9!|f{-E8+*z!f!cO@+dIo@y0E2LV0}P{C!+N%# z4d`4v+13_2inrDP^yc2EnqEP2nWVYBP&#UO_ERZ>W?AFO+?F!Li?KBs#A?Pvq0uN7 z1&M*>imWUMGmK-N$=4>TBvRuj5orx+z3@)reI9BhN$I4qg0DcVR+K4dV{%<4=MvWB zBxcY#5^3C1O#uMX&wiC0Jb?=mg)bRQnH$N75?T*DMnZ{b5Xizr_K8Irv`$FT6tN3P z8dYbu@{xJe4X8YX4qb_zHmlWNH5x-`Sw5*{ZVV`n1`@~3ofMiXs9Km&i4v!I5{ z@X16Z=N|x->k+wyzziH72%z`B>nvgnYhEL-Emn@(;k{l+OoQDlY%5Lw`_e?>h!yO9 zL{mvK($P=YX>I9t7*Y8TO|Z|yBfnwwwk6Q*Qo_{2NQ^z7CqEdI4-QRH?y!8eHLNnZ zrlk~%N)1Xmzy-dbc?l_enf8T&yabvB4@d5u%P6E}&Z5a2AlHPs%lOU&Y8bneQR2>% z;QF{4iw{SrkTJ6;1vH@I?|3<(>yF@nXrmw`mNv<+G!-56Z3H3)n?o$CGwbj^L{L>G zvkEXmc5$w~Qpwst0RRG#lD|3~MfWIx+UQUuOm$wsF=9XydVH}&Qa=VY5txb5%%FZK zNK70CNlWU4Mu|v)7dG-zGKEWzqb8(8X3rxuvBgyyK?G>X52BGtpaz13%SlT9Z4 zh0R>)qur&mTbfW-5sH&%Wc0kAnl*Io%N3mv(l^Nup2@TJRc#NArVVA5rzW_92MxG+WH|A8j!jbW@s{pX1U&|1|(>(oWP+|!judj zoUL_NETR*n5mZjXNv0Q^i_NyHwo?|`?|X$+IrURT^sU^t?~rxI>$CNlQLovd2!_f> zbpv9Y!waoD;;SA5QzX}uZMiP@Not6`U|zi~NULt;!`q@$K_t>4NC5<{G*KWA-W5MQ zS#vHO0Bh90Pt1i52KKt1y(g@_t13o^MjS>ixaf?5CVOd~M9B)zKxnGR;=k<6z>7S8 zK?3E--Nb^39SJUplqj-jkohqOH9qN*G=h(EhCy8|AVkDUJ3xei2#Hf?MR* zc$>MFMB?`mM5u(Ni%`POmSXJ&Q#8?3#jP%us%W5wq_bBT!nOo7FNQ^sz8 zUS)*6a}!8+`S9{Qj}}C^2{Zhqm4vct62?uNeTI$*%aF5BpJf^thU$3NG`80`VVb6C zO6Vw>5;P}jO%END#sCoq@Bk0OhUTwHRRb&?HgMoI_i?olfiqgAGn|dF*tucG7#u-D zf#V?}m<}$lmN^Mk1lW}7_zw{rAG&|(S0zz(Wi>_y5Gp05AcTQWG82pvCb>(Rq!^Vn zc1@GzZl~LP7m@@sDp8~)h?pQ~&AQkW3I(A70?5&_O}cWW1c#225D1JJ5Xe%KE){Dr zdj`nfu?jpOC?+2JD4ZP3GOe0|4Vetjg`M@+CODuI&}{E5T46iG9MIKHZcT5G+6 z$5mRCL~@ug;s?HxWwy~S)-@F%q8gIg+k}Dp%U5boqyuV7dE2wU7@2Yl?EUw(-dO@R zqeeB3D~}I6ma3z@V2xVxY-`j}WGy3B%9kvh{Kp=;0ZI*8y(}`dv#S_O9`e@Ai0vFg z^J^}eiqaC;D;JyXZmAl*ocKiawg$z$)H8R)7+HkNX6nT`jgHJVfvD@1l=XTEylI|-LXHAPsDhVOmP(fW^TI^kGnQbbcE$^_9;X_*PSOh2 z`8sZPFEH9DkFJ&SsU_#bGj11awP3_2{+lvTenSDIU1J%v{GR$hi&S;S$6Zn zH7Bi=NX1-c_cuc?uW)o<3va9+6rK{{Fh+db2Sxun^yPNXF4F%C literal 0 HcmV?d00001 diff --git a/GUI/src/components/Box/Box.scss b/GUI/src/components/Box/Box.scss new file mode 100644 index 00000000..8801c053 --- /dev/null +++ b/GUI/src/components/Box/Box.scss @@ -0,0 +1,56 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.box { + padding: get-spacing(paldiski); + border-radius: 4px; + border: 1px solid; + font-size: $veera-font-size-100; + line-height: $veera-line-height-500; + + &:hover { + cursor: grab; + } + + &--default { + background-color: get-color(black-coral-1); + border-color: get-color(black-coral-3); + } + + &--blue { + background-color: get-color(sapphire-blue-0); + border-color: get-color(sapphire-blue-2); + } + + &--yellow { + background-color: get-color(dark-tangerine-0); + border-color: get-color(dark-tangerine-4); + } + + &--green { + background-color: get-color(sea-green-1); + border-color: get-color(sea-green-3); + } + + &--red { + background-color: get-color(jasper-1); + border-color: get-color(jasper-3); + } + + &--gray { + background-color: get-color(black-coral-1); + border-color: get-color(black-coral-3); + } + + &--dark-blue { + background-color: get-color(sapphire-blue-3); + border-color: get-color(sapphire-blue-5); + } + + &--orange { + background-color: get-color(orange-3); + border-color: get-color(orange-5); + } +} diff --git a/GUI/src/components/Box/index.tsx b/GUI/src/components/Box/index.tsx new file mode 100644 index 00000000..df4d3992 --- /dev/null +++ b/GUI/src/components/Box/index.tsx @@ -0,0 +1,16 @@ +import { forwardRef, PropsWithChildren } from 'react'; +import clsx from 'clsx'; + +import './Box.scss'; + +type BoxProps = { + color?: 'default' | 'blue' | 'yellow' | 'green' | 'red' | 'gray' | 'dark-blue' | 'orange'; +} + +const Box = forwardRef>(({ color = 'default', children }, ref) => { + return ( +
{children}
+ ); +}); + +export default Box; diff --git a/GUI/src/components/Button/Button.scss b/GUI/src/components/Button/Button.scss new file mode 100644 index 00000000..fc21cab1 --- /dev/null +++ b/GUI/src/components/Button/Button.scss @@ -0,0 +1,151 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.btn { + $self: &; + appearance: none; + display: inline-flex; + align-items: center; + background: none; + border: 0; + color: get-color(black-coral-0); + cursor: pointer; + font: inherit; + gap: get-spacing(rapla); + overflow: visible; + padding: 8px 40px; + text-decoration: none; + font-size: $veera-font-size-100; + line-height: 24px; + border-radius: 20px; + white-space: nowrap; + height: fit-content; + + &:focus { + outline: none; + } + + &--disabled { + cursor: not-allowed; + } + + &--primary { + background-color: get-color(sapphire-blue-10); + + &:hover, + &:active { + background-color: get-color(sapphire-blue-13); + } + + &:focus { + box-shadow: inset 0 0 0 2px get-color(sapphire-blue-3) + } + + &#{$self}--disabled { + background-color: get-color(black-coral-2); + color: get-color(white); + } + } + + &--secondary { + background-color: get-color(white); + box-shadow: inset 0 0 0 2px get-color(black-coral-10); + color: get-color(black-coral-15); + + &:hover, + &:active { + box-shadow: inset 0 0 0 2px get-color(black-coral-2); + } + + &:focus { + box-shadow: inset 0 0 0 2px get-color(sapphire-blue-10); + } + + &#{$self}--disabled { + background-color: get-color(black-coral-2); + color: get-color(black-coral-6); + box-shadow: inset 0 0 0 2px get-color(black-coral-2); + } + } + + &--text { + padding: 0; + background: none; + color: get-color(sapphire-blue-10); + gap: 4px; + border-radius: 0; + + &:hover, + &:active { + text-decoration: underline; + } + + &:focus { + box-shadow: inset 0 0 0 2px get-color(sapphire-blue-10); + } + + &#{$self}--disabled { + color: get-color(black-coral-6); + } + } + + &--icon { + width: 36px; + height: 36px; + padding: 0; + justify-content: center; + color: get-color(black-coral-10); + font-size: 24px; + + &:hover, + &:active { + color: get-color(sapphire-blue-10); + } + + &:focus { + color: get-color(sapphire-blue-10); + box-shadow: inset 0 0 0 2px get-color(sapphire-blue-10); + } + } + + &--error { + background-color: get-color(jasper-10); + + &:hover, + &:active { + background-color: get-color(jasper-12); + } + + &:focus { + box-shadow: inset 0 0 0 2px get-color(jasper-13); + } + + &#{$self}--disabled { + background-color: get-color(black-coral-2); + } + } + + &--success { + background-color: get-color(sea-green-10); + + &:hover, + &:active { + background-color: get-color(sea-green-12); + } + + &:focus { + background-color: get-color(sea-green-10); + box-shadow: inset 0 0 0 2px get-color(sea-green-12); + } + + &#{$self}--disabled { + background-color: get-color(black-coral-2); + } + } + + &--s { + padding: 4.5px 24px; + } +} diff --git a/GUI/src/components/Button/index.tsx b/GUI/src/components/Button/index.tsx new file mode 100644 index 00000000..b35cd8c3 --- /dev/null +++ b/GUI/src/components/Button/index.tsx @@ -0,0 +1,56 @@ +import { ButtonHTMLAttributes, FC, PropsWithChildren, useRef } from 'react'; +import clsx from 'clsx'; + +import './Button.scss'; + +type ButtonProps = ButtonHTMLAttributes & { + appearance?: 'primary' | 'secondary' | 'text' | 'icon' | 'error' | 'success'; + size?: 'm' | 's'; + disabledWithoutStyle?: boolean; + showLoadingIcon?: boolean; +}; + +const Button: FC> = ({ + appearance = 'primary', + size = 'm', + disabled, + disabledWithoutStyle = false, + children, + showLoadingIcon = false, + ...rest +}) => { + const ref = useRef(null); + + const buttonClasses = clsx( + 'btn', + `btn--${appearance}`, + `btn--${size}`, + disabled && 'btn--disabled' + ); + + return ( + + ); +}; + +export default Button; \ No newline at end of file diff --git a/GUI/src/components/Card/Card.scss b/GUI/src/components/Card/Card.scss new file mode 100644 index 00000000..82d2665c --- /dev/null +++ b/GUI/src/components/Card/Card.scss @@ -0,0 +1,65 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.card { + $self: &; + background-color: get-color(white); + border: 1px solid get-color(black-coral-2); + border-radius: $veera-radius-s; + margin-bottom: 10px; + + &--borderless { + border: 0; + border-radius: 0; + + #{$self}__header { + border-radius: 0; + } + } + + &--fullWidth { + width: 100%; + } + + &__header, + &__body, + &__footer { + padding: get-spacing(haapsalu); + } + + &__header { + border-bottom: 1px solid get-color(black-coral-2); + background-color: #F9F9F9; + border-radius: $veera-radius-s $veera-radius-s 0 0; + + &.white { + background-color: white + } + } + + &__body { + &.divided { + display: flex; + flex-direction: column; + padding-left: 0px; + padding-right: 0px; + + > :not(:last-child) { + margin-bottom: get-spacing(haapsalu); + border-bottom: 1px solid get-color(black-coral-2); + padding-bottom: get-spacing(haapsalu); + padding-left: get-spacing(haapsalu); + } + + > :is(:last-child) { + padding-left: get-spacing(haapsalu); + } + } + } + + &__footer { + border-top: 1px solid get-color(black-coral-2); + } +} diff --git a/GUI/src/components/Card/index.tsx b/GUI/src/components/Card/index.tsx new file mode 100644 index 00000000..27eb7501 --- /dev/null +++ b/GUI/src/components/Card/index.tsx @@ -0,0 +1,39 @@ +import { FC, PropsWithChildren, ReactNode } from 'react'; +import clsx from 'clsx'; + +import './Card.scss'; + +type CardProps = { + header?: ReactNode; + footer?: ReactNode; + borderless?: boolean; + isHeaderLight?: boolean; + isBodyDivided?: boolean; + isFullWidth?: boolean; +}; + +const Card: FC> = ({ + header, + footer, + borderless, + isHeaderLight, + isBodyDivided, + children, + isFullWidth, +}) => { + return ( +
+ {header && ( +
+ {header} +
+ )} +
+ {children} +
+ {footer &&
{footer}
} +
+ ); +}; + +export default Card; diff --git a/GUI/src/components/Collapsible/Collapsible.scss b/GUI/src/components/Collapsible/Collapsible.scss new file mode 100644 index 00000000..24328e65 --- /dev/null +++ b/GUI/src/components/Collapsible/Collapsible.scss @@ -0,0 +1,35 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.collapsible { + border: 1px solid get-color(black-coral-2); + border-radius: 4px; + + &__trigger { + width: 100%; + display: flex; + align-items: center; + gap: 4px; + padding: get-spacing(haapsalu); + background-color: get-color(extra-light); + border-radius: 4px; + + &[aria-expanded=true] { + border-bottom: 1px solid get-color(black-coral-2); + border-radius: 4px 4px 0 0; + } + + .icon { + font-size: 21px; + } + } + + &__content { + padding: get-spacing(haapsalu); + background-color: get-color(white); + border-radius: 0 0 4px 4px; + overflow: hidden; + } +} diff --git a/GUI/src/components/Collapsible/index.tsx b/GUI/src/components/Collapsible/index.tsx new file mode 100644 index 00000000..02a13bda --- /dev/null +++ b/GUI/src/components/Collapsible/index.tsx @@ -0,0 +1,31 @@ +import { FC, PropsWithChildren, useState } from 'react'; +import * as RadixCollapsible from '@radix-ui/react-collapsible'; +import { MdOutlineAddBox, MdOutlineIndeterminateCheckBox } from 'react-icons/md'; + +import { Icon } from 'components'; +import './Collapsible.scss'; + +type CollapsibleProps = { + title: string; + defaultOpen?: boolean; +} + +const Collapsible: FC> = ({ defaultOpen = false, title, children }) => { + const [open, setOpen] = useState(defaultOpen); + + return ( + + + + + + {children} + + + ); +}; + +export default Collapsible; diff --git a/GUI/src/components/DataTable/CloseIcon.tsx b/GUI/src/components/DataTable/CloseIcon.tsx new file mode 100644 index 00000000..85de2dbb --- /dev/null +++ b/GUI/src/components/DataTable/CloseIcon.tsx @@ -0,0 +1,22 @@ +import React from 'react'; +import './DeboucedInput.scss'; + +const CloseIcon: React.FC = () => ( + + + + +); + +export default CloseIcon; diff --git a/GUI/src/components/DataTable/DataTable.scss b/GUI/src/components/DataTable/DataTable.scss new file mode 100644 index 00000000..fcf13b37 --- /dev/null +++ b/GUI/src/components/DataTable/DataTable.scss @@ -0,0 +1,197 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/typography'; + +.data-table { + width: 100%; + color: get-color(black-coral-20); + text-align: left; + margin-bottom: 0; + display: table; + + &__scrollWrapper { + height: 100%; + min-height: 150px !important; + padding: 10px 20px; + overflow-x: auto; + white-space: nowrap; + display: block; + background-color: white; + border-radius: 10px; + border: solid 1px get-color(black-coral-1); + } + + thead, + tbody { + width: 100%; + } + + th { + padding: 12px 14.5px; + color: get-color(black-coral-12); + border-bottom: 1px solid get-color(black-coral-10); + font-weight: $veera-font-weight-beta; + vertical-align: middle; + position: relative; + } + + td { + padding: 12px 24px 12px 16px; + border-bottom: 1px solid get-color(black-coral-2); + vertical-align: middle; + max-width: fit-content; + + p { + white-space: break-spaces; + } + + .entity { + display: inline-flex; + align-items: center; + padding-left: 4px; + background-color: get-color(sapphire-blue-2); + border-radius: 4px; + + span { + display: inline-flex; + font-size: $veera-font-size-80; + background-color: get-color(white); + padding: 0 4px; + border-radius: 4px; + margin: 2px 2px 2px 4px; + } + } + } + + tbody { + tr { + &:last-child { + td { + border-bottom: 0; + } + } + } + } + + &__filter { + position: absolute; + top: 100%; + left: 0; + right: 0; + padding: get-spacing(paldiski); + background-color: get-color(white); + box-shadow: 0 4px 10px rgba(0, 0, 0, 0.14); + border-radius: 0 0 4px 4px; + border: 1px solid get-color(black-coral-2); + + input { + width: 100%; + display: block; + appearance: none; + background-color: get-color(white); + border: 1px solid get-color(black-coral-6); + border-radius: 5px; + color: var(--color-black); + font-size: $veera-font-size-100; + height: 32px; + line-height: 24px; + padding: get-spacing(paldiski); + + &::placeholder { + color: get-color(black-coral-6); + } + + &:focus { + outline: none; + border-color: get-color(sapphire-blue-10); + } + } + } + + &__pagination-wrapper { + margin-top: 10px; + display: flex; + padding: 6px 16px; + } + + &__pagination { + display: flex; + align-items: center; + gap: 15px; + margin: 0 auto; + + + .data-table__page-size { + margin-left: 0; + } + + .next, + .previous { + display: flex; + color: get-color(sapphire-blue-10); + + &[disabled] { + color: get-color(black-coral-11); + cursor: initial; + } + } + + .links { + display: flex; + align-items: center; + gap: 5px; + font-size: $veera-font-size-80; + color: get-color(black-coral-10); + + li { + display: block; + + a, + span { + display: flex; + align-items: center; + justify-content: center; + width: 25px; + height: 25px; + border-radius: 50%; + + &:hover { + text-decoration: none; + } + } + + &.active { + a, + span { + color: get-color(white); + background-color: get-color(sapphire-blue-10); + } + } + } + } + } + + &__page-size { + display: flex; + align-items: center; + gap: 8px; + font-size: $veera-font-size-80; + line-height: 16px; + color: get-color(black-coral-11); + margin-left: auto; + + select { + appearance: none; + font-size: $veera-font-size-70; + line-height: 16px; + height: 30px; + min-width: 50px; + padding: 6px 10px; + border: 1px solid #8f91a8; + border-radius: 2px; + background-color: get-color(white); + background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTAiIGhlaWdodD0iNiIgdmlld0JveD0iMCAwIDEwIDYiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxwYXRoIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNNS4zMTMwNiA1LjgwODIyQzUuMTU2ODUgNS45NjQ0MyA0LjkwMzU4IDUuOTY0NDMgNC43NDczNyA1LjgwODIyTDAuMjgyNzMgMS4zNDM1OEMwLjEyNjUyIDEuMTg3MzcgMC4xMjY1MiAwLjkzNDEwMiAwLjI4MjczIDAuNzc3ODkzTDAuNzc3NzA0IDAuMjgyOTE4QzAuOTMzOTE0IDAuMTI2NzA4IDEuMTg3MTggMC4xMjY3MDggMS4zNDMzOSAwLjI4MjkxN0w1LjAzMDIyIDMuOTY5NzRMOC43MTcwNCAwLjI4MjkxN0M4Ljg3MzI1IDAuMTI2NzA4IDkuMTI2NTIgMC4xMjY3MDggOS4yODI3MyAwLjI4MjkxN0w5Ljc3NzcgMC43Nzc4OTJDOS45MzM5MSAwLjkzNDEwMiA5LjkzMzkxIDEuMTg3MzcgOS43Nzc3IDEuMzQzNThMNS4zMTMwNiA1LjgwODIyWiIgZmlsbD0iIzU1NTg2NyIvPgo8L3N2Zz4K'); + background-repeat: no-repeat; + background-position: top 11px right 10px; + } + } +} diff --git a/GUI/src/components/DataTable/DeboucedInput.scss b/GUI/src/components/DataTable/DeboucedInput.scss new file mode 100644 index 00000000..753f1ad0 --- /dev/null +++ b/GUI/src/components/DataTable/DeboucedInput.scss @@ -0,0 +1,11 @@ +.input-container { + position: relative; +} + +.search-icon { + position: absolute; + top: 50%; + right: 10px; + margin-left: 10px; + transform: translateY(-50%); +} diff --git a/GUI/src/components/DataTable/DebouncedInput.tsx b/GUI/src/components/DataTable/DebouncedInput.tsx new file mode 100644 index 00000000..1ad1f52f --- /dev/null +++ b/GUI/src/components/DataTable/DebouncedInput.tsx @@ -0,0 +1,54 @@ +import { FC, InputHTMLAttributes, useEffect, useState } from 'react'; +import './DeboucedInput.scss'; +import CloseIcon from './CloseIcon'; + +type DebouncedInputProps = Omit< + InputHTMLAttributes, + 'onChange' +> & { + value: string | number | string[]; + onChange: (value: string | number | string[]) => void; + debounce?: number; +}; + +const DebouncedInput: FC = ({ + value: initialValue, + onChange, + debounce = 500, + ...props +}) => { + const [value, setValue] = useState(initialValue); + + useEffect(() => { + setValue(initialValue); + }, [initialValue]); + + useEffect(() => { + const timeout = setTimeout(() => { + onChange(value); + }, debounce); + + return () => clearTimeout(timeout); + }, [value]); + + return ( +
+ setValue(e.target.value)} + /> + {value && ( + + )} +
+ ); +}; + +export default DebouncedInput; diff --git a/GUI/src/components/DataTable/Filter.tsx b/GUI/src/components/DataTable/Filter.tsx new file mode 100644 index 00000000..038d8118 --- /dev/null +++ b/GUI/src/components/DataTable/Filter.tsx @@ -0,0 +1,65 @@ +import React, { FC, useState, MouseEvent } from 'react'; +import { Column, Table } from '@tanstack/react-table'; +import { useTranslation } from 'react-i18next'; +import { MdOutlineSearch } from 'react-icons/md'; + +import { Icon } from 'components'; +import useDocumentEscapeListener from 'hooks/useDocumentEscapeListener'; +import DebouncedInput from './DebouncedInput'; + +type FilterProps = { + column: Column; + table: Table; +}; + +const Filter: FC = ({ column, table }) => { + const { t } = useTranslation(); + const [filterOpen, setFilterOpen] = useState(false); + const firstValue = table + .getPreFilteredRowModel() + .flatRows[0]?.getValue(column.id); + + const columnFilterValue = column.getFilterValue(); + + useDocumentEscapeListener(() => setFilterOpen(false)); + + const handleFilterToggle = (e: MouseEvent) => { + e.stopPropagation(); + setFilterOpen(!filterOpen); + }; + + return ( + <> + + {filterOpen && ( +
+ {typeof firstValue === 'number' ? ( + + column.setFilterValue((old: [number, number]) => [ + value, + old?.[1], + ]) + } + /> + ) : ( + column.setFilterValue(value)} + placeholder={t('global.search') + '...'} + /> + )} +
+ )} + + ); +}; + +export default Filter; diff --git a/GUI/src/components/DataTable/index.tsx b/GUI/src/components/DataTable/index.tsx new file mode 100644 index 00000000..8f748721 --- /dev/null +++ b/GUI/src/components/DataTable/index.tsx @@ -0,0 +1,242 @@ +import React, { CSSProperties, FC, ReactNode, useId } from 'react'; +import { + ColumnDef, + useReactTable, + getCoreRowModel, + flexRender, + getSortedRowModel, + SortingState, + FilterFn, + getFilteredRowModel, + VisibilityState, + getPaginationRowModel, + PaginationState, + TableMeta, + Row, + RowData, ColumnFiltersState, +} from '@tanstack/react-table'; +import { + RankingInfo, + rankItem, +} from '@tanstack/match-sorter-utils'; +import { + MdUnfoldMore, + MdExpandMore, + MdExpandLess, + MdOutlineEast, + MdOutlineWest, +} from 'react-icons/md'; +import clsx from 'clsx'; +import { Link } from 'react-router-dom'; +import { useTranslation } from 'react-i18next'; + +import { Icon, Track } from 'components'; +import Filter from './Filter'; +import './DataTable.scss'; + +type DataTableProps = { + data: any; + columns: ColumnDef[]; + tableBodyPrefix?: ReactNode; + isClientSide?: boolean; + sortable?: boolean; + filterable?: boolean; + pagination?: PaginationState; + sorting?: SortingState; + setPagination?: (state: PaginationState) => void; + setSorting?: (state: SortingState) => void; + globalFilter?: string; + setGlobalFilter?: React.Dispatch>; + columnVisibility?: VisibilityState; + setColumnVisibility?: React.Dispatch>; + disableHead?: boolean; + pagesCount?: number; + meta?: TableMeta; +}; + +type ColumnMeta = { + meta: { + size: number | string; + } +} + +type CustomColumnDef = ColumnDef & ColumnMeta; + +declare module '@tanstack/table-core' { + interface FilterFns { + fuzzy: FilterFn; + } + + interface FilterMeta { + itemRank: RankingInfo; + } +} + +declare module '@tanstack/react-table' { + interface TableMeta { + getRowStyles: (row: Row) => CSSProperties; + } + class Column { + columnDef: CustomColumnDef; + } +} + +const fuzzyFilter: FilterFn = (row, columnId, value, addMeta) => { + const itemRank = rankItem(row.getValue(columnId), value); + addMeta({ + itemRank, + }); + return itemRank.passed; +}; + +const DataTable: FC = ( + { + data, + columns, + isClientSide = true, + tableBodyPrefix, + sortable, + filterable, + pagination, + sorting, + setPagination, + setSorting, + globalFilter, + setGlobalFilter, + columnVisibility, + setColumnVisibility, + disableHead, + pagesCount, + meta, + }, +) => { + const id = useId(); + const { t } = useTranslation(); + const [columnFilters, setColumnFilters] = React.useState([]); + const table = useReactTable({ + data, + columns, + filterFns: { + fuzzy: fuzzyFilter, + }, + state: { + sorting, + columnFilters, + globalFilter, + columnVisibility, + ...{ pagination }, + }, + meta, + onColumnFiltersChange: setColumnFilters, + onGlobalFilterChange: setGlobalFilter, + onColumnVisibilityChange: setColumnVisibility, + globalFilterFn: fuzzyFilter, + onSortingChange: (updater) => { + if (typeof updater !== 'function') return; + setSorting?.(updater(table.getState().sorting)); + }, + onPaginationChange: (updater) => { + if (typeof updater !== 'function') return; + setPagination?.(updater(table.getState().pagination)); + }, + getCoreRowModel: getCoreRowModel(), + getFilteredRowModel: getFilteredRowModel(), + ...(pagination && { getPaginationRowModel: getPaginationRowModel() }), + ...(sortable && { getSortedRowModel: getSortedRowModel() }), + manualPagination: isClientSide ? undefined : true, + manualSorting: isClientSide ? undefined : true, + pageCount: isClientSide ? undefined : pagesCount, + }); + + return ( +
+ + {!disableHead && ( + + {table.getHeaderGroups().map((headerGroup) => ( + + {headerGroup.headers.map((header) => ( + + ))} + + ))} + + )} + + {tableBodyPrefix} + {table.getRowModel().rows.map((row) => ( + + {row.getVisibleCells().map((cell) => ( + + ))} + + ))} + +
+ {header.isPlaceholder ? null : ( + + {sortable && header.column.getCanSort() && ( + + )} + {flexRender(header.column.columnDef.header, header.getContext())} + {filterable && header.column.getCanFilter() && ( + + )} + + )} +
{flexRender(cell.column.columnDef.cell, cell.getContext())}
+ {pagination && ( +
+ {(table.getPageCount() * table.getState().pagination.pageSize) > table.getState().pagination.pageSize && ( +
+ + + +
+ )} +
+ )} +
+ ); +}; + +export default DataTable; diff --git a/GUI/src/components/Dialog/Dialog.scss b/GUI/src/components/Dialog/Dialog.scss new file mode 100644 index 00000000..bc67c6e0 --- /dev/null +++ b/GUI/src/components/Dialog/Dialog.scss @@ -0,0 +1,63 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.dialog { + background-color: get-color(white); + box-shadow: 0 0 20px rgba(0, 0, 0, 0.25); + border-radius: 4px; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + width: 100%; + max-width: 600px; + z-index: 1011111; + max-height: 90vh; + + &--large { + max-width: 800px; + } + + &__overlay { + position: fixed; + inset: 0; + background-color: rgba(0, 0, 0, 0.54); + z-index: 100; + } + + &__header, + &__body, + &__footer { + padding: get-spacing(haapsalu); + } + + &__header { + display: flex; + align-items: center; + gap: get-spacing(haapsalu); + background-color: get-color(black-coral-0); + border-bottom: 1px solid get-color(black-coral-2); + } + + &__title { + flex: 1; + } + + &__close { + display: flex; + align-items: center; + justify-content: center; + font-size: 20px; + } + + &__body { + overflow: auto; + max-height: calc(90vh - 70px); + } + + &__footer { + border-top: 1px solid get-color(black-coral-2); + } +} diff --git a/GUI/src/components/Dialog/index.tsx b/GUI/src/components/Dialog/index.tsx new file mode 100644 index 00000000..7b2848c1 --- /dev/null +++ b/GUI/src/components/Dialog/index.tsx @@ -0,0 +1,45 @@ +import { FC, PropsWithChildren, ReactNode } from 'react'; +import * as RadixDialog from '@radix-ui/react-dialog'; +import { MdOutlineClose } from 'react-icons/md'; +import clsx from 'clsx'; +import './Dialog.scss'; +import Icon from 'components/Icon'; +import Track from 'components/Track'; + +type DialogProps = { + title?: string | null; + footer?: ReactNode; + onClose: () => void; + size?: 'default' | 'large'; + isOpen?: boolean; +} + +const Dialog: FC> = ({ title, footer, onClose, size = 'default', children,isOpen }) => { + return ( + + + + + { + title &&
+ {title} + + + +
+ } +
+ {children} +
+ {footer && ( + {footer} + )} +
+
+
+ ); +}; + +export default Dialog; diff --git a/GUI/src/components/Drawer/Drawer.scss b/GUI/src/components/Drawer/Drawer.scss new file mode 100644 index 00000000..df7bc711 --- /dev/null +++ b/GUI/src/components/Drawer/Drawer.scss @@ -0,0 +1,40 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.drawer { + position: fixed; + display: flex; + flex-direction: column; + top: 100px; + right: 0; + bottom: 0; + background-color: get-color(white); + box-shadow: 0 4px 10px rgba(0, 0, 0, 0.14); + width: 50%; + transition: transform .25s ease-out; + overflow: hidden; + z-index: 98; + + &__header { + display: flex; + align-items: center; + gap: get-spacing(haapsalu); + padding: get-spacing(haapsalu); + border-bottom: 1px solid get-color(black-coral-2); + + .icon { + font-size: 20px; + } + } + + &__title, + &__body { + flex: 1; + } + + &__body { + overflow: auto; + } +} diff --git a/GUI/src/components/Drawer/index.tsx b/GUI/src/components/Drawer/index.tsx new file mode 100644 index 00000000..9b6f771f --- /dev/null +++ b/GUI/src/components/Drawer/index.tsx @@ -0,0 +1,42 @@ +import { CSSProperties, FC, PropsWithChildren, useEffect, useRef } from 'react'; +import { MdOutlineClose } from 'react-icons/md'; +import autoAnimate from '@formkit/auto-animate'; + +import { Icon } from 'components'; +import './Drawer.scss'; + +type DrawerProps = { + title: string; + onClose: () => void; + style?: CSSProperties; +} + +const Drawer: FC> = ({ title, onClose, children, style }) => { + const ref = useRef(null); + + useEffect(() => { + ref.current && autoAnimate(ref.current); + const handleKeyup = (e: KeyboardEvent) => { + if (e.key === 'Escape') onClose(); + }; + document.addEventListener('keyup', handleKeyup); + + return () => document.removeEventListener('keyup', handleKeyup); + }, [onClose]); + + return ( +
+
+

{title}

+ +
+
+ {children} +
+
+ ); +}; + +export default Drawer; diff --git a/GUI/src/components/FileUpload/index.tsx b/GUI/src/components/FileUpload/index.tsx new file mode 100644 index 00000000..5750fba6 --- /dev/null +++ b/GUI/src/components/FileUpload/index.tsx @@ -0,0 +1,98 @@ +import { FormInput } from 'components/FormElements'; +import React, { + ChangeEvent, + forwardRef, + useImperativeHandle, + Ref, + useRef, + useState, +} from 'react'; +import { useTranslation } from 'react-i18next'; + +type FileUploadProps = { + onFileSelect: (file: File | undefined) => void; + accept?: string | string[]; + disabled?: boolean; +}; + +export type FileUploadHandle = { + clearFile: () => void; +}; + +const FileUpload = forwardRef( + (props: FileUploadProps, ref: Ref) => { + const { onFileSelect, accept, disabled } = props; + const fileInputRef = useRef(null); + const [errorMessage, setErrorMessage] = useState(''); + const { t } = useTranslation(); + useImperativeHandle(ref, () => ({ + clearFile() { + onFileSelect(undefined); + if (fileInputRef.current) { + fileInputRef.current.value = ''; + } + }, + })); + + const handleFileChange = (e: ChangeEvent) => { + const file = e.target.files ? e.target.files[0] : undefined; + const maxFileSize = 20 * 1024 * 1024; // 20 MB in bytes + + if (file) { + if (file.size > maxFileSize) { + setErrorMessage(t('global.maxFileSize') ?? ''); + onFileSelect(undefined); + if (fileInputRef.current) { + fileInputRef.current.value = ''; + } + } else { + setErrorMessage(''); + onFileSelect(file); + } + } else { + setErrorMessage(''); + onFileSelect(undefined); + } + }; + + const restrictFormat = (accept: string | string[]) => { + if (typeof accept === 'string') { + if (accept === 'json') return '.json'; + else if (accept === 'xlsx') return '.xlsx'; + else if (accept === 'yaml') return '.yaml, .yml'; + return ''; + } else { + return accept.map((ext) => `.${ext}`).join(', '); + } + }; + + return ( +
+ + + + {errorMessage &&

{errorMessage}

} + +
+ ); + } +); + +export default FileUpload; diff --git a/GUI/src/components/FormElements/DynamicForm/index.tsx b/GUI/src/components/FormElements/DynamicForm/index.tsx new file mode 100644 index 00000000..e7a0129f --- /dev/null +++ b/GUI/src/components/FormElements/DynamicForm/index.tsx @@ -0,0 +1,86 @@ +import React, { useEffect, useState } from 'react'; +import { useForm } from 'react-hook-form'; +import FormInput from '../FormInput'; +import Button from 'components/Button'; +import Track from 'components/Track'; +import { useTranslation } from 'react-i18next'; + +type DynamicFormProps = { + formData: { [key: string]: string | number }; + onSubmit: (data: any) => void; + setPatchUpdateModalOpen: React.Dispatch>; +}; + +const DynamicForm: React.FC = ({ + formData, + onSubmit, + setPatchUpdateModalOpen, +}) => { + const { register, handleSubmit, getValues, watch } = useForm({ + defaultValues: formData, + }); + + const [isChanged, setIsChanged] = useState(false); + + const allValues = watch(); + const { t } = useTranslation(); + + const checkIfChanged = () => { + const currentValues = getValues(); + const isDifferent = Object.keys(formData).some( + (key) => currentValues[key] !== formData[key] + ); + setIsChanged(isDifferent); + }; + + useEffect(() => { + checkIfChanged(); + }, [allValues]); + + const renderInput = (key: string) => { + const isRowID = key.toLowerCase() === 'rowid'; + const inputType = isRowID ? 'number' : 'text'; + + return ( +
+ + +
+ ); + }; + + const handleFormSubmit = (data: any) => { + onSubmit(data); + }; + + return ( +
+ {Object.keys(formData).map((key) => ( +
+
{renderInput(key)}
+
+ ))} + +
+ + +
+ +
+ ); +}; + +export default DynamicForm; diff --git a/GUI/src/components/FormElements/FormCheckbox/FormCheckbox.scss b/GUI/src/components/FormElements/FormCheckbox/FormCheckbox.scss new file mode 100644 index 00000000..8bdf863d --- /dev/null +++ b/GUI/src/components/FormElements/FormCheckbox/FormCheckbox.scss @@ -0,0 +1,57 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.checkbox { + width: 100%; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + + &__label { + display: block; + flex: 0 0 85px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__item { + input[type=checkbox] { + display: none; + + + label { + display: block; + padding-left: 32px; + position: relative; + font-size: $veera-font-size-100; + line-height: $veera-line-height-500; + + &::before { + content: ''; + display: block; + width: 16px; + height: 16px; + box-shadow: inset 0 0 0 1px get-color(black-coral-2); + border-radius: 2px; + position: absolute; + left: 4px; + top: 4px; + } + } + + &:checked { + + label { + &::before { + background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTEiIHZpZXdCb3g9IjAgMCAxNCAxMSIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggZD0iTTQuNzQ5NzkgOC4xMjkwNkwxLjYyMjI5IDUuMDAxNTZMMC41NjEwMzUgNi4wNjI4MUw0Ljc0OTc5IDEwLjI1MTZMMTMuNzQ5OCAxLjI1MTU2TDEyLjY4ODUgMC4xOTAzMDhMNC43NDk3OSA4LjEyOTA2WiIgZmlsbD0id2hpdGUiLz4KPC9zdmc+Cg=='); + background-color: get-color(sapphire-blue-10); + background-repeat: no-repeat; + background-position: center; + background-size: 13px 10px; + box-shadow: inset 0 0 0 1px get-color(sapphire-blue-10); + } + } + } + } + } +} diff --git a/GUI/src/components/FormElements/FormCheckbox/index.tsx b/GUI/src/components/FormElements/FormCheckbox/index.tsx new file mode 100644 index 00000000..66645255 --- /dev/null +++ b/GUI/src/components/FormElements/FormCheckbox/index.tsx @@ -0,0 +1,39 @@ +import { forwardRef, InputHTMLAttributes, useId } from 'react'; + +import './FormCheckbox.scss'; + +type FormCheckboxType = InputHTMLAttributes & { + label: string; + name: string; + hideLabel?: boolean; + item: { + label: string; + value: string; + checked?: boolean; + }; +} + +const FormCheckbox = forwardRef(( + { + label, + name, + hideLabel, + item, + ...rest + }, + ref, +) => { + const uid = useId(); + + return ( +
+ {label && !hideLabel && } +
+ + +
+
+ ); +}); + +export default FormCheckbox; diff --git a/GUI/src/components/FormElements/FormCheckboxes/FormCheckboxes.scss b/GUI/src/components/FormElements/FormCheckboxes/FormCheckboxes.scss new file mode 100644 index 00000000..8312649c --- /dev/null +++ b/GUI/src/components/FormElements/FormCheckboxes/FormCheckboxes.scss @@ -0,0 +1,68 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.checkboxes { + display: flex; + align-items: flex-start; + gap: get-spacing(paldiski); + + &__label { + display: block; + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__wrapper { + display: flex; + flex-direction: column; + gap: 8px; + } + + &__row { + display: flex; + gap: 20px; + } + + &__item { + input[type=checkbox] { + display: none; + + + label { + display: block; + padding-left: 32px; + position: relative; + font-size: $veera-font-size-100; + line-height: $veera-line-height-500; + text-transform: capitalize; + + &::before { + content: ''; + display: block; + width: 16px; + height: 16px; + box-shadow: inset 0 0 0 1px get-color(black-coral-2); + border-radius: 2px; + position: absolute; + left: 4px; + top: 4px; + } + } + + &:checked { + + label { + &::before { + background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTQiIGhlaWdodD0iMTEiIHZpZXdCb3g9IjAgMCAxNCAxMSIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggZD0iTTQuNzQ5NzkgOC4xMjkwNkwxLjYyMjI5IDUuMDAxNTZMMC41NjEwMzUgNi4wNjI4MUw0Ljc0OTc5IDEwLjI1MTZMMTMuNzQ5OCAxLjI1MTU2TDEyLjY4ODUgMC4xOTAzMDhMNC43NDk3OSA4LjEyOTA2WiIgZmlsbD0id2hpdGUiLz4KPC9zdmc+Cg=='); + background-color: get-color(sapphire-blue-10); + background-repeat: no-repeat; + background-position: center; + background-size: 13px 10px; + box-shadow: inset 0 0 0 1px get-color(sapphire-blue-10); + } + } + } + } + } +} diff --git a/GUI/src/components/FormElements/FormCheckboxes/index.tsx b/GUI/src/components/FormElements/FormCheckboxes/index.tsx new file mode 100644 index 00000000..4f94d48b --- /dev/null +++ b/GUI/src/components/FormElements/FormCheckboxes/index.tsx @@ -0,0 +1,77 @@ +import { ChangeEvent, FC, useId, useState, useEffect } from 'react'; + +import './FormCheckboxes.scss'; + +type FormCheckboxesType = { + label: string; + name: string; + hideLabel?: boolean; + onValuesChange?: (values: Record) => void; + items: { + label: string; + value: string; + }[] |undefined; + isStack?: boolean; + error?: string; + selectedValues?: string[]; +}; + +const FormCheckboxes: FC = ({ + label, + name, + hideLabel, + onValuesChange, + items, + isStack = true, + error, + selectedValues = [], +}) => { + const id = useId(); + const [internalSelectedValues, setInternalSelectedValues] = useState(selectedValues); + + useEffect(() => { + setInternalSelectedValues(selectedValues); + }, [selectedValues]); + + const handleValuesChange = (e: ChangeEvent) => { + const { checked, value } = e.target; + + const newValues = checked + ? [...internalSelectedValues, value] + : internalSelectedValues.filter((v: string) => v !== value); + + setInternalSelectedValues(newValues); + + if (onValuesChange) onValuesChange({ [name]: newValues }); + }; + + return ( +
+
+
+ {label && !hideLabel && ( + + )} +
+ {items?.map((item, index) => ( +
+ + +
+ ))} +
+
+
+
{error &&

{error}

}
+
+ ); +}; + +export default FormCheckboxes; diff --git a/GUI/src/components/FormElements/FormDatepicker/FormDatepicker.scss b/GUI/src/components/FormElements/FormDatepicker/FormDatepicker.scss new file mode 100644 index 00000000..55ac0785 --- /dev/null +++ b/GUI/src/components/FormElements/FormDatepicker/FormDatepicker.scss @@ -0,0 +1,154 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.datepicker { + $self: &; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + width: 100%; + + &__label { + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__wrapper_column { + display: flex; + flex-direction: column; + gap: 7px; + position: relative; + width: 125px; + + .icon { + position: absolute; + right: 8px; + top: 8px; + pointer-events: none; + } + } + + &__wrapper_row { + display: flex; + flex-direction: row; + gap: 7px; + position: relative; + width: 125px; + + .icon { + position: absolute; + right: 8px; + top: 8px; + pointer-events: none; + } + } + + &__error { + width: 100%; + margin-right: 6px; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + color: get-color(black-coral-20); + border-radius: $veera-radius-s; + background-color: get-color(jasper-3); + font-size: 13px; + line-height: 20px; + box-shadow: 0 1px 5px rgba(0, 0, 0, 0.2); + + &::before { + content: ''; + display: block; + background-color: get-color(jasper-3); + border-left: 16px solid transparent; + border-right: 16px solid transparent; + border-bottom: 25px; + } + } + + input { + width: 100%; + display: block; + appearance: none; + background-color: get-color(white); + border: 1px solid get-color(black-coral-6); + border-radius: $veera-radius-s; + color: var(--color-black); + font-size: $veera-font-size-100; + height: 40px; + line-height: 24px; + padding: get-spacing(paldiski); + + &::placeholder { + color: get-color(black-coral-6); + } + + &:focus { + outline: none; + border-color: get-color(sapphire-blue-10); + } + } + + &--error { + input { + border-color: get-color(jasper-10); + } + } + + &--disabled & { + input { + background-color: get-color(black-coral-0); + } + } +} + +.react-datepicker { + font-family: inherit; + font-size: 14px; + border: 1px solid get-color(black-coral-6); + border-radius: 4px; + + &-popper[data-placement^=bottom] { + padding: 0; + } + + &-wrapper { + display: block; + } + + &__input-container { + display: block; + } + + &__triangle { + &::before, + &::after { + content: none !important; + } + } + + &__navigation { + width: 50px; + height: 50px; + top: 0; + + &:hover { + background-color: var(--color-bg); + } + + &--previous { + border-top-left-radius: 4px; + border-right: 1px solid var(--color-gray); + left: 0; + } + + &--next { + border-top-right-radius: 4px; + border-left: 1px solid var(--color-gray); + right: 0; + } + } +} diff --git a/GUI/src/components/FormElements/FormDatepicker/index.tsx b/GUI/src/components/FormElements/FormDatepicker/index.tsx new file mode 100644 index 00000000..1de8e635 --- /dev/null +++ b/GUI/src/components/FormElements/FormDatepicker/index.tsx @@ -0,0 +1,98 @@ +import { forwardRef, useId } from 'react'; +import ReactDatePicker, { registerLocale } from 'react-datepicker'; +import clsx from 'clsx'; +import { et } from 'date-fns/locale'; +import { ControllerRenderProps } from 'react-hook-form'; +import { + MdChevronRight, + MdChevronLeft, + MdOutlineToday, + MdOutlineSchedule, +} from 'react-icons/md'; + +import { Icon } from 'components'; +import 'react-datepicker/dist/react-datepicker.css'; +import './FormDatepicker.scss'; + +registerLocale('et-EE', et); + +type FormDatepickerProps = ControllerRenderProps & { + label: string; + name: string; + hideLabel?: boolean; + disabled?: boolean; + placeholder?: string; + timePicker?: boolean; + direction?: 'row' | 'column'; +}; + +const FormDatepicker = forwardRef( + ( + { + label, + name, + hideLabel, + disabled, + placeholder, + timePicker, + direction = 'column', + ...rest + }, + ref + ) => { + const id = useId(); + const { value, onChange } = rest; + + const datepickerClasses = clsx( + 'datepicker', + disabled && 'datepicker--disabled' + ); + + return ( +
+ {label && !hideLabel && ( + + )} +
+ } + nextMonthButtonLabel={} + aria-label={hideLabel ? label : undefined} + showTimeSelect={timePicker} + showTimeSelectOnly={timePicker} + timeIntervals={15} + timeFormat="HH:mm:ss" + timeInputLabel="" + portalId="overlay-root" + {...rest} + onChange={onChange} + /> + + ) : ( + + ) + } + size="medium" + /> +
+
+ ); + } +); + +export default FormDatepicker; diff --git a/GUI/src/components/FormElements/FormInput/FormInput.scss b/GUI/src/components/FormElements/FormInput/FormInput.scss new file mode 100644 index 00000000..c010c478 --- /dev/null +++ b/GUI/src/components/FormElements/FormInput/FormInput.scss @@ -0,0 +1,97 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.input { + $self: &; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + width: 100%; + + &__label { + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__wrapper { + flex: 1; + display: flex; + flex-direction: column; + gap: 0px; + position: relative; + + .icon { + position: absolute; + top: 10px; + right: 10px; + } + } + + &__inline_error { + color: get-color(jasper-10); + font-size: 12px; + + } + + &__error { + width: 100%; + margin-right: 6px; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + color: get-color(black-coral-20); + border-radius: $veera-radius-s; + background-color: get-color(jasper-3); + font-size: 13px; + line-height: 20px; + box-shadow: 0 1px 5px rgba(0, 0, 0, 0.2); + + &::before { + content: ''; + display: block; + background-color: get-color(jasper-3); + border-left: 16px solid transparent; + border-right: 16px solid transparent; + border-bottom: 25px; + } + } + + input { + width: 100%; + display: block; + appearance: none; + background-color: get-color(white); + border: 1px solid get-color(black-coral-6); + border-radius: $veera-radius-s; + color: var(--color-black); + font-size: $veera-font-size-100; + height: 40px; + line-height: 24px; + padding: get-spacing(paldiski); + + &::placeholder { + color: get-color(black-coral-6); + } + + &:focus { + outline: none; + border-color: get-color(sapphire-blue-10); + } + } + + &--error { + input { + border-color: get-color(jasper-10); + } + } + + &--disabled & { + input { + background-color: get-color(black-coral-0); + border: solid 1px get-color(jasper-10); + } + } +} diff --git a/GUI/src/components/FormElements/FormInput/index.tsx b/GUI/src/components/FormElements/FormInput/index.tsx new file mode 100644 index 00000000..dd8df673 --- /dev/null +++ b/GUI/src/components/FormElements/FormInput/index.tsx @@ -0,0 +1,50 @@ +import { forwardRef, InputHTMLAttributes, PropsWithChildren, useId } from 'react'; +import clsx from 'clsx'; +import './FormInput.scss'; +import { DefaultTFuncReturn } from 'i18next'; + +type InputProps = PropsWithChildren> & { + label: string; + name: string; + hideLabel?: boolean; + maxLength?: number; + error?: string; + placeholder?:string | DefaultTFuncReturn; +}; + +const FormInput = forwardRef( + ( + { label, name, disabled, hideLabel, maxLength, error, children,placeholder, ...rest }, + ref + ) => { + const id = useId(); + + const inputClasses = clsx('input', disabled && 'input--disabled', error && 'input--error'); + + return ( +
+ {label && !hideLabel && ( + + )} +
+ + {error &&

{error}

} + {children} +
+
+ ); + } +); + +export default FormInput; diff --git a/GUI/src/components/FormElements/FormRadios/FormRadios.scss b/GUI/src/components/FormElements/FormRadios/FormRadios.scss new file mode 100644 index 00000000..d0db7fbd --- /dev/null +++ b/GUI/src/components/FormElements/FormRadios/FormRadios.scss @@ -0,0 +1,76 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.radios { + width: 100%; + display: flex; + align-items: flex-start; + gap: get-spacing(paldiski); + + &__label { + display: block; + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__wrapper { + display: flex; + gap: 8px; + } + + &__stack { + gap: 8px; + } + + &__item { + input[type=radio] { + display: none; + + + label { + display: block; + padding-left: 32px; + position: relative; + font-size: $veera-font-size-100; + line-height: $veera-line-height-500; + text-transform: capitalize; + + &::before { + content: ''; + display: block; + width: 16px; + height: 16px; + box-shadow: inset 0 0 0 1px get-color(black-coral-2); + border-radius: 50%; + position: absolute; + left: 4px; + top: 4px; + } + } + + &:checked { + + label { + &::before { + width: 20px; + height: 20px; + box-shadow: inset 0 0 0 1px #8F91A8; + } + + &::after { + content: ''; + display: block; + width: 10px; + height: 10px; + border-radius: 50%; + background-color: get-color(sapphire-blue-10); + position: absolute; + top: 9px; + left: 9px; + } + } + } + } + } +} diff --git a/GUI/src/components/FormElements/FormRadios/index.tsx b/GUI/src/components/FormElements/FormRadios/index.tsx new file mode 100644 index 00000000..9c276d4a --- /dev/null +++ b/GUI/src/components/FormElements/FormRadios/index.tsx @@ -0,0 +1,65 @@ +import { FC, useId } from 'react'; +import './FormRadios.scss'; + +type FormRadiosType = { + label: string; + name: string; + hideLabel?: boolean; + items: { + label: string; + value: string; + }[] |undefined; + onChange: (selectedValue: string) => void; + selectedValue?: string; + isStack?: boolean; + error?: string; +}; + +const FormRadios: FC = ({ + label, + name, + hideLabel, + items, + onChange, + selectedValue, + isStack = false, + error, +}) => { + const id = useId(); + + return ( +
+
+
+ {label && !hideLabel && ( + + )} +
+ {items?.map((item, index) => ( +
+ { + onChange(event.target.value); + }} + /> + +
+ ))} +
+
+
+
{error &&

{error}

}
+
+ ); +}; + +export default FormRadios; + + + + diff --git a/GUI/src/components/FormElements/FormSelect/FormMultiselect.tsx b/GUI/src/components/FormElements/FormSelect/FormMultiselect.tsx new file mode 100644 index 00000000..ef9480ae --- /dev/null +++ b/GUI/src/components/FormElements/FormSelect/FormMultiselect.tsx @@ -0,0 +1,124 @@ +import { FC, ReactNode, SelectHTMLAttributes, useId, useState } from 'react'; +import { useSelect } from 'downshift'; +import clsx from 'clsx'; +import { useTranslation } from 'react-i18next'; +import { MdArrowDropDown } from 'react-icons/md'; + +import { Icon } from 'components'; +import './FormSelect.scss'; + +type SelectOption = { label: string, value: string }; + +type FormMultiselectProps = SelectHTMLAttributes & { + label: ReactNode; + name: string; + placeholder?: string; + hideLabel?: boolean; + options: SelectOption[]; + selectedOptions?: SelectOption[]; + onSelectionChange?: (selection: SelectOption[] | null) => void; +}; + +const FormMultiselect: FC = ( + { + label, + hideLabel, + options, + disabled, + placeholder, + defaultValue, + selectedOptions, + onSelectionChange, + ...rest + }, +) => { + const id = useId(); + const { t } = useTranslation(); + const [selectedItems, setSelectedItems] = useState(selectedOptions ?? []); + const { + isOpen, + getToggleButtonProps, + getLabelProps, + getMenuProps, + highlightedIndex, + getItemProps, + } = useSelect({ + items: options, + stateReducer: (state, actionAndChanges) => { + const { changes, type } = actionAndChanges; + if (type === useSelect.stateChangeTypes.ItemClick) { + return { + ...changes, + isOpen: true, + highlightedIndex: state.highlightedIndex, + }; + } else { + return changes; + } + }, + selectedItem: null, + onSelectedItemChange: ({ selectedItem }) => { + if (!selectedItem) { + return; + } + const index = selectedItems.findIndex((item) => item.value === selectedItem.value); + const items = []; + if (index > 0) { + items.push( + ...selectedItems.slice(0, index), + ...selectedItems.slice(index + 1) + ); + } else if (index === 0) { + items.push(...selectedItems.slice(1)); + } else { + items.push(...selectedItems, selectedItem); + } + setSelectedItems(items); + if (onSelectionChange) onSelectionChange(items); + }, + }); + + const selectClasses = clsx( + 'select', + disabled && 'select--disabled', + ); + + const placeholderValue = placeholder || t('global.choose'); + + return ( +
+ {label && !hideLabel && } +
+
+ {selectedItems?.length > 0 ? `${t('global.chosen')} (${selectedItems?.length})` : placeholderValue} + } /> +
+ +
    + {isOpen && + options.map((item, index) => ( +
  • + s.value).includes(item.value)} + value={item.value} + onChange={() => null} + /> + {item.label} +
  • + ))} +
+
+
+ ); +}; + + +export default FormMultiselect; diff --git a/GUI/src/components/FormElements/FormSelect/FormSelect.scss b/GUI/src/components/FormElements/FormSelect/FormSelect.scss new file mode 100644 index 00000000..b6b4f434 --- /dev/null +++ b/GUI/src/components/FormElements/FormSelect/FormSelect.scss @@ -0,0 +1,128 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.select { + $self: &; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + width: 100%; + + + &__label { + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__wrapper { + width: 100%; + position: relative; + } + + &__error { + border: 1px solid get-color(jasper-10); + + } + + &__default { + border: 1px solid get-color(black-coral-6); + + } + + &__trigger { + width: 100%; + display: flex; + align-items: center; + justify-content: space-between; + appearance: none; + background-color: get-color(white); + border-radius: $veera-radius-s; + color: get-color(black); + font-size: $veera-font-size-100; + height: 40px; + line-height: 24px; + padding: get-spacing(paldiski); + + .icon { + font-size: $veera-font-size-250; + } + + &[aria-expanded=true] { + border-color: get-color(sapphire-blue-10); + border-radius: 3px; + + + #{$self}__menu { + display: block; + } + + +#{$self}__menu_up { + display: block; + } + + .icon { + transform: rotate(180deg); + } + } + } + + &__menu { + display: none; + position: absolute; + top: 100%; + left: 0; + right: 0; + background-color: get-color(white); + border-radius: 4px; + border: 1px solid get-color(black-coral-2); + border-top: 1; + z-index: 9998; + max-height: 320px; + overflow: auto; + margin-top: 3px; + } + + &__menu_up { + display: none; + position: absolute; + top: auto; + left: 0; + right: 0; + bottom: 100%; + background-color: get-color(white); + border-radius: 4px; + border: 1px solid get-color(black-coral-2); + border-top: 1; + z-index: 9998; + max-height: 320px; + overflow: auto; + margin-bottom: 3px; + } + + &__option { + display: flex; + align-items: center; + gap: 8px; + padding: 8px 15px; + + span { + display: block; + } + + &[aria-selected=true] { + background-color: #DDEBFF; + + &:hover, + &:focus { + background-color: get-color(sapphire-blue-10); + } + } + + &:hover, + &:focus { + background-color: get-color(black-coral-0); + } + } +} diff --git a/GUI/src/components/FormElements/FormSelect/index.tsx b/GUI/src/components/FormElements/FormSelect/index.tsx new file mode 100644 index 00000000..37bb89cd --- /dev/null +++ b/GUI/src/components/FormElements/FormSelect/index.tsx @@ -0,0 +1,148 @@ +import { + forwardRef, + ReactNode, + SelectHTMLAttributes, + useId, + useState, + useEffect, +} from 'react'; +import { useSelect } from 'downshift'; +import clsx from 'clsx'; +import { useTranslation } from 'react-i18next'; +import { MdArrowDropDown } from 'react-icons/md'; + +import { Icon } from 'components'; +import './FormSelect.scss'; +import { ControllerRenderProps } from 'react-hook-form'; + +type FormSelectOption = { + label: string; + value: string | { name: string; id: string }; +}; + +type FormSelectProps = Partial & + SelectHTMLAttributes & { + label: ReactNode; + name: string; + placeholder?: string; + hideLabel?: boolean; + direction?: 'down' | 'up'; + options: FormSelectOption[]; + onSelectionChange?: (selection: FormSelectOption | null) => void; + error?: string; + defaultValue?: string | { name: string; id: string } | number; + }; + +const itemToString = (item: FormSelectOption | null) => { + return item ? item.value.toString() : ''; +}; + +const FormSelect = forwardRef( + ( + { + label, + hideLabel, + direction = 'down', + options, + disabled, + placeholder, + defaultValue, + onSelectionChange, + error, + ...rest + }, + ref + ) => { + const id = useId(); + const { t } = useTranslation(); + + const [selectedItem, setSelectedItem] = useState( + options?.find((o) => o.value === defaultValue) || + options?.find( + (o) => typeof o.value === 'object' && o.value?.name === defaultValue + ) || + null + ); + + useEffect(() => { + const newSelectedItem = + options?.find((o) => o.value === defaultValue) || + options?.find( + (o) => typeof o.value === 'object' && o.value?.name === defaultValue + ) || + null; + setSelectedItem(newSelectedItem); + }, [defaultValue, options]); + + const { + isOpen, + getToggleButtonProps, + getLabelProps, + getMenuProps, + highlightedIndex, + getItemProps, + } = useSelect({ + id, + items: options, + itemToString, + selectedItem, + onSelectedItemChange: ({ selectedItem: newSelectedItem }) => { + setSelectedItem(newSelectedItem ?? null); + if (onSelectionChange) onSelectionChange(newSelectedItem ?? null); + }, + }); + + const selectClasses = clsx('select', disabled && 'select--disabled'); + + const placeholderValue = + placeholder || t('datasetGroups.createDataset.selectPlaceholder'); + + return ( +
+ {label && !hideLabel && ( + + )} +
+
+ {selectedItem?.label ?? placeholderValue} + } + /> +
+
    + {isOpen && + options.map((item, index) => ( +
  • + {item.label} +
  • + ))} +
+ {error &&

{error}

} +
+
+ ); + } +); + +export default FormSelect; diff --git a/GUI/src/components/FormElements/FormTextarea/FormTextarea.scss b/GUI/src/components/FormElements/FormTextarea/FormTextarea.scss new file mode 100644 index 00000000..ff1971ac --- /dev/null +++ b/GUI/src/components/FormElements/FormTextarea/FormTextarea.scss @@ -0,0 +1,109 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.textarea { + $self: &; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + width: 100%; + + &__label { + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__wrapper { + flex: 1; + display: flex; + flex-direction: column; + gap: 7px; + position: relative; + } + + &__error { + width: 100%; + margin-right: 6px; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + color: get-color(black-coral-20); + border-radius: $veera-radius-s; + background-color: get-color(jasper-3); + font-size: 13px; + line-height: 20px; + box-shadow: 0 1px 5px rgba(0, 0, 0, 0.2); + + &::before { + content: ''; + display: block; + background-color: get-color(jasper-3); + border-left: 16px solid transparent; + border-right: 16px solid transparent; + border-bottom: 25px; + } + } + + &__max-length-top { + position: absolute; + top: 10px; + right: 8px; + font-size: $veera-font-size-80; + color: get-color(black-coral-12); + pointer-events: none; + } + + &__max-length-bottom { + position: absolute; + bottom: 10px; + right: 8px; + font-size: $veera-font-size-80; + color: get-color(black-coral-12); + pointer-events: none; + } + + textarea { + width: 100%; + display: block; + appearance: none; + background-color: get-color(white); + border: 1px solid get-color(black-coral-6); + border-radius: $veera-radius-s; + color: var(--color-black); + font-size: $veera-font-size-80; + line-height: $veera-line-height-500; + height: 40px; + min-height: 40px; + padding: get-spacing(paldiski); + + &::placeholder { + color: get-color(black-coral-6); + } + + &:focus { + outline: none; + border-color: get-color(sapphire-blue-10); + } + } + + &--error { + input { + border-color: get-color(jasper-10); + } + } + + &--disabled & { + input { + background-color: get-color(black-coral-0); + } + } + + &--maxlength-shown { + textarea { + padding-right: 70px; + } + } +} diff --git a/GUI/src/components/FormElements/FormTextarea/index.tsx b/GUI/src/components/FormElements/FormTextarea/index.tsx new file mode 100644 index 00000000..4190c782 --- /dev/null +++ b/GUI/src/components/FormElements/FormTextarea/index.tsx @@ -0,0 +1,72 @@ +import { ChangeEvent, forwardRef, useId, useState } from 'react'; +import TextareaAutosize, { TextareaAutosizeProps } from 'react-textarea-autosize'; +import clsx from 'clsx'; + +import './FormTextarea.scss'; + +type TextareaProps = TextareaAutosizeProps & { + label: string; + name: string; + hideLabel?: boolean; + showMaxLength?: boolean; + maxLengthBottom?: boolean; +}; + +const FormTextarea = forwardRef(( + { + label, + name, + maxLength = 2000, + minRows = 3, + maxRows = 3, + disabled, + hideLabel, + showMaxLength, + maxLengthBottom, + defaultValue, + onChange, + ...rest + }, + ref, +) => { + const id = useId(); + const [currentLength, setCurrentLength] = useState((typeof defaultValue === 'string' && defaultValue.length) || 0); + const textareaClasses = clsx( + 'textarea', + disabled && 'textarea--disabled', + showMaxLength && 'textarea--maxlength-shown', + ); + + const handleOnChange = (e: ChangeEvent) => { + if (showMaxLength) { + setCurrentLength(e.target.value.length); + } + }; + + return ( +
+ {label && !hideLabel && } +
+ { + if (onChange) onChange(e); + handleOnChange(e); + }} + {...rest} + /> + {showMaxLength && ( +
{currentLength}/{maxLength}
+ )} +
+
+ ); +}); + +export default FormTextarea; diff --git a/GUI/src/components/FormElements/Switch/Switch.scss b/GUI/src/components/FormElements/Switch/Switch.scss new file mode 100644 index 00000000..fddf67c0 --- /dev/null +++ b/GUI/src/components/FormElements/Switch/Switch.scss @@ -0,0 +1,68 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.switch { + $self: &; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + + &__label { + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + &__button { + display: flex; + align-items: center; + gap: 4px; + height: 40px; + isolation: isolate; + padding: 4px; + border-radius: 20px; + background-color: get-color(black-coral-1); + font-size: $veera-font-size-80; + line-height: $veera-line-height-500; + color: get-color(black-coral-12); + position: relative; + transition: background-color .25s ease-out; + + &[aria-checked=true] { + background-color: var(--active-color, get-color(sapphire-blue-10)); + color: get-color(sapphire-blue-10); + + #{$self} { + &__off { + color: get-color(white); + background: none; + } + + &__on { + color: var(--active-color, get-color(sapphire-blue-10)); + background-color: get-color(white); + } + } + } + } + + &__thumb { + display: none; + } + + &__on, + &__off { + display: flex; + border-radius: 20px; + padding: 5.5px 10px; + font-weight: $veera-font-weight-delta; + transition: all .25s ease-out; + } + + &__off { + font-weight: $veera-font-weight-delta; + background-color: get-color(white); + } +} diff --git a/GUI/src/components/FormElements/Switch/index.tsx b/GUI/src/components/FormElements/Switch/index.tsx new file mode 100644 index 00000000..ed414c7e --- /dev/null +++ b/GUI/src/components/FormElements/Switch/index.tsx @@ -0,0 +1,68 @@ +import { forwardRef, useId } from 'react'; +import * as RadixSwitch from '@radix-ui/react-switch'; +import { useTranslation } from 'react-i18next'; +import { ControllerRenderProps } from 'react-hook-form'; + +import './Switch.scss'; + +type SwitchProps = Partial & { + onLabel?: string; + offLabel?: string; + onColor?: string; + name?: string; + label: string; + checked?: boolean; + defaultChecked?: boolean; + hideLabel?: boolean; + onCheckedChange?: (checked: boolean) => void; +}; + +const Switch = forwardRef( + ( + { + onLabel, + offLabel, + onColor, + name, + label, + checked, + hideLabel, + onCheckedChange, + defaultChecked, + }, + ref + ) => { + const id = useId(); + const { t } = useTranslation(); + const onValueLabel = onLabel || t('global.on'); + const offValueLabel = offLabel || t('global.off'); + + return ( +
+ {label && !hideLabel && ( + + )} + + + {onValueLabel} + {offValueLabel} + +
+ ); + } +); + +export default Switch; diff --git a/GUI/src/components/FormElements/SwitchBox/SwitchBox.scss b/GUI/src/components/FormElements/SwitchBox/SwitchBox.scss new file mode 100644 index 00000000..2f7a0490 --- /dev/null +++ b/GUI/src/components/FormElements/SwitchBox/SwitchBox.scss @@ -0,0 +1,45 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.switchbox { + $self: &; + width: 100%; + display: flex; + align-items: center; + justify-content: space-between; + + &__button { + width: 48px; + height: 8px; + border-radius: 4px; + background-color: get-color(black-coral-6); + position: relative; + + &[aria-checked=true] { + background-color: get-color(sapphire-blue-4); + + #{$self} { + &__thumb { + transform: translate(24px, -50%); + background-color: get-color(sapphire-blue-10); + } + } + } + } + + &__thumb { + position: absolute; + width: 24px; + height: 24px; + border-radius: 50%; + background-color: get-color(white); + border: 1px solid get-color(black-coral-2); + box-shadow: 0 4px 10px rgba(0, 0, 0, 0.14); + left: 0; + top: 50%; + transform: translateY(-50%); + transition: all .25s ease-out; + } +} diff --git a/GUI/src/components/FormElements/SwitchBox/index.tsx b/GUI/src/components/FormElements/SwitchBox/index.tsx new file mode 100644 index 00000000..1550576a --- /dev/null +++ b/GUI/src/components/FormElements/SwitchBox/index.tsx @@ -0,0 +1,44 @@ +import { forwardRef, useId } from 'react'; +import * as RadixSwitch from '@radix-ui/react-switch'; +import { ControllerRenderProps } from 'react-hook-form'; + +import './SwitchBox.scss'; + +type SwitchBoxProps = Partial & { + name?: string; + label: string; + checked?: boolean; + hideLabel?: boolean; + onCheckedChange?: (checked: boolean) => void; +} + +const SwitchBox = forwardRef(( + { + name, + label, + checked, + hideLabel, + onCheckedChange, + }, + ref, +) => { + const id = useId(); + + return ( +
+ {label && !hideLabel && } + + + +
+ ); +}); + +export default SwitchBox; diff --git a/GUI/src/components/FormElements/index.tsx b/GUI/src/components/FormElements/index.tsx new file mode 100644 index 00000000..ac295d55 --- /dev/null +++ b/GUI/src/components/FormElements/index.tsx @@ -0,0 +1,23 @@ +import FormInput from './FormInput'; +import FormTextarea from './FormTextarea'; +import FormSelect from './FormSelect'; +import FormMultiselect from './FormSelect/FormMultiselect'; +import Switch from './Switch'; +import FormCheckboxes from './FormCheckboxes'; +import FormRadios from './FormRadios'; +import FormCheckbox from './FormCheckbox'; +import FormDatepicker from './FormDatepicker'; +import SwitchBox from './SwitchBox'; + +export { + FormInput, + FormTextarea, + FormSelect, + FormMultiselect, + Switch, + FormCheckboxes, + FormRadios, + FormCheckbox, + FormDatepicker, + SwitchBox, +}; diff --git a/GUI/src/components/Header/Header.scss b/GUI/src/components/Header/Header.scss new file mode 100644 index 00000000..542c06f6 --- /dev/null +++ b/GUI/src/components/Header/Header.scss @@ -0,0 +1,10 @@ +@import '@buerokratt-ria/styles/styles/tools/spacing'; +@import '@buerokratt-ria/styles/styles/tools/color'; + +.header { + height: 100px; + padding: 24px 24px 24px 42px; + box-shadow: 0 0 2px rgba(0, 0, 0, 0.14), 0 2px 2px rgba(0, 0, 0, 0.12), 0 1px 3px rgba(0, 0, 0, 0.2); + background-color: get-color(white); + z-index: 99; +} diff --git a/GUI/src/components/Header/index.tsx b/GUI/src/components/Header/index.tsx new file mode 100644 index 00000000..51e30ec3 --- /dev/null +++ b/GUI/src/components/Header/index.tsx @@ -0,0 +1,196 @@ +/* eslint-disable react-hooks/exhaustive-deps */ +import { FC, useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useMutation, useQuery } from '@tanstack/react-query'; +import { AxiosError } from 'axios'; + +import { Track, Button, Dialog } from 'components'; +import useStore from 'store'; +import { useToast } from 'hooks/useToast'; +import apiDev from 'services/api-dev'; +import { useCookies } from 'react-cookie'; +import './Header.scss'; +import { useDialog } from 'hooks/useDialog'; +import { ButtonAppearanceTypes } from 'enums/commonEnums'; +import { authEndpoints } from 'utils/endpoints'; +import { authQueryKeys } from 'utils/queryKeys'; +import { UserInfo } from 'types/userInfo'; + +interface HeaderProps { + toastContext: any; + user: UserInfo | null; +} + +const Header: FC = () => { + const { t } = useTranslation(); + const userInfo = useStore((state) => state.userInfo); + const toast = useToast(); + + const { open } = useDialog(); + + const [sessionTimeOutDuration, setSessionTimeOutDuration] = + useState(30); + const [sessionTimeOutModalOpened, setSessionTimeOutModalOpened] = + useState(false); + const [sessionExtentionInProgress, setSessionExtentionInProgress] = + useState(false); + const customJwtCookieKey = 'customJwtCookie'; + + useEffect(() => { + const interval = setInterval(() => { + const expirationTimeStamp = localStorage.getItem('exp'); + if ( + expirationTimeStamp !== 'null' && + expirationTimeStamp !== null && + expirationTimeStamp !== undefined + ) { + const expirationDate = new Date(parseInt(expirationTimeStamp) ?? ''); + const currentDate = new Date(Date.now()); + if ( + expirationDate.getTime() - currentDate.getTime() <= 240000 + ) { + if (!sessionTimeOutModalOpened) { + setSessionTimeOutModalOpened(true); + setSessionTimeOutDuration(30); + } + } + } + }, 2000); + return () => clearInterval(interval); + }, [open, sessionTimeOutDuration]); + + useEffect(() => { + let timer= null; + if (sessionTimeOutModalOpened) { + timer = setInterval(() => { + setSessionTimeOutDuration((prev) => { + if (prev > 0) { + return prev - 1; + } else { + if (!sessionExtentionInProgress) handleLogout(); + return 0; + } + }); + }, 1000); + } else if (timer) { + clearInterval(timer); + } + + return () => { + if (timer) { + clearInterval(timer); + } + }; + }, [sessionTimeOutModalOpened]); + + const [_, setCookie] = useCookies([customJwtCookieKey]); + + const setNewCookie = (cookieValue: string) => { + const cookieOptions = { path: '/' }; + setCookie(customJwtCookieKey, cookieValue, cookieOptions); + }; + + const extendUserSessionMutation = useMutation({ + mutationFn: async () => { + return await apiDev.get(authEndpoints.GET_EXTENDED_COOKIE()); + }, + onSuccess: (data) => { + setNewCookie(data?.data?.response); + setSessionTimeOutDuration(30); + setSessionTimeOutModalOpened(false); + setSessionExtentionInProgress(false); + refetch() + }, + onError: (error: AxiosError) => { + handleLogout(); + }, + }); + + const { refetch } = useQuery({ + queryKey: authQueryKeys.USER_DETAILS(), + onSuccess: (res: { response: UserInfo }) => { + localStorage.setItem('exp', res.response.JWTExpirationTimestamp); + useStore.getState().setUserInfo(res.response); + }, + enabled: false + }); + const logoutMutation = useMutation({ + mutationFn: () => apiDev.get(authEndpoints.LOGOUT()), + onSuccess() { + localStorage.removeItem('exp'); + window.location.href = import.meta.env.REACT_APP_CUSTOMER_SERVICE_LOGIN; + }, + onError: async (error: AxiosError) => { + toast.open({ + type: 'error', + title: t('global.notificationError'), + message: error.message, + }); + }, + }); + + const handleLogout = () => { + localStorage.removeItem('exp'); + logoutMutation.mutate(); + }; + return ( +
+
+ + {userInfo && ( + + + + )} + +
+ + {sessionTimeOutModalOpened && ( + setSessionTimeOutModalOpened(false)} + isOpen={sessionTimeOutModalOpened} + title={t('global.sessionTimeOutTitle') ?? ''} + footer={ +
+ + +
+ } + > +

+ {t('global.sessionTimeOutDesc', { + seconds: sessionTimeOutDuration, + }) ?? ''} +

+
+ )} +
+ ); +}; + +export default Header; diff --git a/GUI/src/components/Icon/Icon.scss b/GUI/src/components/Icon/Icon.scss new file mode 100644 index 00000000..ce570acf --- /dev/null +++ b/GUI/src/components/Icon/Icon.scss @@ -0,0 +1,17 @@ +@import 'src/styles/tools/spacing'; + +.icon { + display: inline-flex; + align-items: center; + justify-content: center; + + &--small { + width: get-spacing(haapsalu); + height: get-spacing(haapsalu); + } + + &--medium { + width: get-spacing(kuressaare); + height: get-spacing(kuressaare); + } +} diff --git a/GUI/src/components/Icon/index.tsx b/GUI/src/components/Icon/index.tsx new file mode 100644 index 00000000..d9ab3988 --- /dev/null +++ b/GUI/src/components/Icon/index.tsx @@ -0,0 +1,26 @@ +import { CSSProperties, forwardRef, ReactNode, StyleHTMLAttributes } from 'react'; +import * as AccessibleIcon from '@radix-ui/react-accessible-icon'; +import clsx from 'clsx'; + +import './Icon.scss'; + +type IconProps = StyleHTMLAttributes & { + label?: string | null; + icon: ReactNode; + size?: 'small' | 'medium'; +}; + +const Icon = forwardRef(({ label, icon, size = 'small', ...rest }, ref) => { + const iconClasses = clsx( + 'icon', + `icon--${size}`, + ); + + return ( + + {icon} + + ); +}); + +export default Icon; diff --git a/GUI/src/components/Label/Label.scss b/GUI/src/components/Label/Label.scss new file mode 100644 index 00000000..4c5a6675 --- /dev/null +++ b/GUI/src/components/Label/Label.scss @@ -0,0 +1,91 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.label { + $self: &; + display: flex; + padding: 1.5px 16px; + font-size: 12px; + font-weight: $veera-font-weight-delta; + border: 2px solid; + background-color: get-color(white); + border-radius: $veera-radius-s; + position: relative; + width: fit-content; + height: fit-content; + margin-right: 5px; + text-transform: capitalize; + + &--info { + color: get-color(sapphire-blue-10); + border-color: get-color(sapphire-blue-10); + + #{$self} { + &__icon { + border-color: get-color(sapphire-blue-10); + } + } + } + + &--warning { + color: get-color(dark-tangerine-10); + border-color: get-color(dark-tangerine-10); + + #{$self} { + &__icon { + border-color: get-color(dark-tangerine-10); + } + } + } + + &--error { + color: get-color(jasper-10); + border-color: get-color(jasper-10); + + #{$self} { + &__icon { + border-color: get-color(jasper-10); + } + } + } + + &--default { + color: get-color(black-coral-7); + border-color: get-color(black-coral-7); + + #{$self} { + &__icon { + border-color: get-color(black-coral-7); + } + } + } + + &--success { + color: get-color(sea-green-10); + border-color: get-color(sea-green-10); + + #{$self} { + &__icon { + border-color: get-color(sea-green-10); + } + } + } + + &__icon { + display: flex; + align-items: center; + justify-content: center; + position: absolute; + font-size: 13px; + line-height: 15px; + right: -8px; + top: 4px; + width: 16px; + height: 16px; + border-radius: 50%; + border: 2px solid; + background-color: get-color(white); + } +} diff --git a/GUI/src/components/Label/index.tsx b/GUI/src/components/Label/index.tsx new file mode 100644 index 00000000..e27d0d4f --- /dev/null +++ b/GUI/src/components/Label/index.tsx @@ -0,0 +1,40 @@ +import { forwardRef, PropsWithChildren, ReactNode } from 'react'; +import clsx from 'clsx'; +import { MdOutlineCheck } from 'react-icons/md'; + +import { Tooltip } from 'components'; +import './Label.scss'; + +type LabelProps = { + type?: 'warning' | 'error' | 'info' | 'success' | 'default'; + tooltip?: ReactNode; +} + +const Label = forwardRef>(( + { + type = 'default', + tooltip, + children, + }, ref, +) => { + const labelClasses = clsx( + 'label', + `label--${type}`, + tooltip && 'label--tooltip', + ); + + return ( + + {children} + {tooltip && ( + + + {type === 'success' ? : 'i'} + + + )} + + ); +}); + +export default Label; diff --git a/GUI/src/components/LabelChip/index.scss b/GUI/src/components/LabelChip/index.scss new file mode 100644 index 00000000..ed40b04a --- /dev/null +++ b/GUI/src/components/LabelChip/index.scss @@ -0,0 +1,23 @@ +.label-chip { + display: inline-flex; + align-items: center; + justify-content: center; + padding: 6px 20px; + border-radius: 16px; + background-color: #e0e0e0; + margin: 4px; + gap: 7px; +} + +.label-chip .label { + margin-right: 8px; +} + +.label-chip .button { + background: none; + border: none; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; +} \ No newline at end of file diff --git a/GUI/src/components/LabelChip/index.tsx b/GUI/src/components/LabelChip/index.tsx new file mode 100644 index 00000000..146e80c1 --- /dev/null +++ b/GUI/src/components/LabelChip/index.tsx @@ -0,0 +1,25 @@ +import React from 'react'; +import './index.scss'; +import { MdClose } from 'react-icons/md'; + +type LabelChipProps = { + label: string; + onRemove: () => void; +}; + +const LabelChip: React.FC = ({ label, onRemove }) => { + return ( +
+ {label} + +
+ ); +}; + +export default LabelChip; diff --git a/GUI/src/components/Layout/Layout.scss b/GUI/src/components/Layout/Layout.scss new file mode 100644 index 00000000..08099c6a --- /dev/null +++ b/GUI/src/components/Layout/Layout.scss @@ -0,0 +1,28 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; + +.layout { + height: 100%; + display: flex; + + &__wrapper { + flex: 1; + display: flex; + flex-direction: column; + position: relative; + } + + &__main { + flex: 1; + display: flex; + flex-direction: column; + overflow-x: hidden; + gap: get-spacing(haapsalu); + padding: get-spacing(haapsalu); + position: absolute; + top: 100px; + left: 0; + right: 0; + bottom: 0; + } +} diff --git a/GUI/src/components/Layout/index.tsx b/GUI/src/components/Layout/index.tsx new file mode 100644 index 00000000..c26eca42 --- /dev/null +++ b/GUI/src/components/Layout/index.tsx @@ -0,0 +1,23 @@ +import { FC } from 'react'; +import { Outlet } from 'react-router-dom'; +import useStore from 'store'; +import './Layout.scss'; +import { useToast } from '../../hooks/useToast'; +import Header from 'components/Header'; +import MainNavigation from 'components/MainNavigation'; + +const Layout: FC = () => { + return ( +
+ +
+
+
+ +
+
+
+ ); +}; + +export default Layout; diff --git a/GUI/src/components/MainNavigation/MainNavigation.scss b/GUI/src/components/MainNavigation/MainNavigation.scss new file mode 100644 index 00000000..93b2556d --- /dev/null +++ b/GUI/src/components/MainNavigation/MainNavigation.scss @@ -0,0 +1,130 @@ +@import '@buerokratt-ria/styles/styles/tools/spacing'; +@import '@buerokratt-ria/styles/styles/tools/color'; +@import '@buerokratt-ria/styles/styles/settings/variables/typography'; + +.nav { + $self: &; + width: 208px; + background-color: get-color(sapphire-blue-10); + overflow: auto; + scrollbar-width: none; + transition: width .1s ease-out; + z-index: 100; + + &::-webkit-scrollbar { + display: none; + } + + li, a, .nav__toggle, .nav__menu-toggle { + font-size: 14px; + line-height: 1.5; + } + + &__menu-toggle { + display: flex; + align-items: center; + + &:hover { + background-color: get-color(sapphire-blue-8); + } + + &:active { + background-color: get-color(sapphire-blue-7); + } + } + + a, .nav__toggle { + width: 100%; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + color: get-color(black-coral-0); + padding: 14px 8px 14px 32px; + box-shadow: inset 0 -1px 0 get-color(sapphire-blue-14); + + span:not(.icon) { + flex: 1; + display: block; + } + + &:hover { + background-color: get-color(sapphire-blue-8); + } + + &:active { + background-color: #2E78B3; + } + + &.active { + background-color: #2E78B3; + font-weight: 700; + } + } + + &__toggle { + &[aria-expanded=true] { + font-weight: 700; + + .icon { + transform: rotate(180deg); + } + + + ul { + display: block; + } + } + + &.nav__toggle--icon { + + .icon:first-child { + transform: none; + } + } + } + + &__toggle-icon { + margin-left: auto; + } + + &__menu-toggle { + display: flex; + align-items: center; + gap: get-spacing(paldiski); + width: 100%; + color: get-color(white); + padding: 14px 8px; + box-shadow: inset 0 -1px 0 get-color(sapphire-blue-14); + } + + &__submenu { + display: none; + + a, .nav__toggle { + background-color: get-color(sapphire-blue-14); + box-shadow: inset 0 -1px 0 get-color(sapphire-blue-17); + } + + #{$self} { + &__submenu { + a { + background-color: get-color(sapphire-blue-17); + box-shadow: inset 0 -1px 0 get-color(black); + padding: 14px 48px 14px 40px; + } + } + } + } +} + +.collapsed { + .nav__submenu { + visibility: hidden; + height: 0; + } + + button[aria-expanded=true] { + .icon { + transform: rotate(0deg); + } + } +} diff --git a/GUI/src/components/MainNavigation/index.tsx b/GUI/src/components/MainNavigation/index.tsx new file mode 100644 index 00000000..58425ca9 --- /dev/null +++ b/GUI/src/components/MainNavigation/index.tsx @@ -0,0 +1,140 @@ +import { FC, MouseEvent, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { NavLink, useLocation } from 'react-router-dom'; +import { MdKeyboardArrowDown } from 'react-icons/md'; +import { useQuery } from '@tanstack/react-query'; +import clsx from 'clsx'; +import { Icon } from 'components'; +import type { MenuItem } from 'types/mainNavigation'; +import './MainNavigation.scss'; +import apiDev from 'services/api-dev'; +import { userManagementEndpoints } from 'utils/endpoints'; +import { integrationQueryKeys } from 'utils/queryKeys'; +import { ROLES } from 'enums/roles'; +import UserIcon from 'assets/UserIcon'; +import DatabaseIcon from 'assets/DatabaseIcon'; +import DataModelsIcon from 'assets/DataModelsIcon'; +import IncomingTextsIcon from 'assets/IncomingTextsIcon'; + +const MainNavigation: FC = () => { + const { t } = useTranslation(); + const [menuItems, setMenuItems] = useState([]); + + const items = [ + { + id: 'userManagement', + label: t('menu.userManagement'), + path: '/user-management', + icon: , + }, + { + id: 'agencies', + label: t('menu.agencies'), + path: '/integrated-agencies', + icon: + }, + { + id: 'dataSets', + label: t('menu.dataSets'), + path: '/datasets', + icon: , + }, + { + id: 'correctedTexts', + label: t('menu.correctedTexts'), + path: '/corrected-texts', + icon: , + } + ]; + + const filterItemsByRole = (role: string[], items: MenuItem[]) => { + return items?.filter((item) => { + if (role.includes(ROLES.ROLE_ADMINISTRATOR)) return item?.id; + else if (role.includes(ROLES.ROLE_MODEL_TRAINER)) + return item?.id !== 'userManagement' && item?.id !== 'integration'; + else return false; + }); + }; + + useQuery(integrationQueryKeys.USER_ROLES(), { + queryFn: async () => { + const res = await apiDev.get(userManagementEndpoints.FETCH_USER_ROLES()); + return res?.data?.response; + }, + onSuccess: (res) => { + const roles = res; + const filteredItems = filterItemsByRole(roles, items); + setMenuItems(filteredItems); + }, + onError: (error) => { + console.error('Error fetching user roles:', error); + }, + }); + const location = useLocation(); + const navCollapsed = false; + + const handleNavToggle = (event: MouseEvent) => { + const isExpanded = + event?.currentTarget?.getAttribute('aria-expanded') === 'true'; + event?.currentTarget?.setAttribute( + 'aria-expanded', + isExpanded ? 'false' : 'true' + ); + }; + + const renderMenuTree = (menuItems: MenuItem[]) => { + return menuItems?.map((menuItem) => ( +
  • + {menuItem?.children ? ( +
    + +
      + {renderMenuTree(menuItem?.children)} +
    +
    + ) : ( + + {' '} + + {menuItem?.label} + + )} +
  • + )); + }; + + if (!menuItems) return null; + + return ( + + ); +}; + +export default MainNavigation; \ No newline at end of file diff --git a/GUI/src/components/Popover/Popover.scss b/GUI/src/components/Popover/Popover.scss new file mode 100644 index 00000000..9278c909 --- /dev/null +++ b/GUI/src/components/Popover/Popover.scss @@ -0,0 +1,15 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/typography'; + +.popover { + background-color: get-color(white); + padding: 4px; + border-radius: 4px; + filter: drop-shadow(0px 0px 20px rgba(0, 0, 0, 0.25)); + font-size: $veera-font-size-80; + + &__arrow { + fill: get-color(white); + } +} diff --git a/GUI/src/components/Popover/index.tsx b/GUI/src/components/Popover/index.tsx new file mode 100644 index 00000000..929015bd --- /dev/null +++ b/GUI/src/components/Popover/index.tsx @@ -0,0 +1,27 @@ +import { FC, PropsWithChildren, ReactNode } from 'react'; +import * as RadixPopover from '@radix-ui/react-popover'; + +import './Popover.scss'; + +type PopoverProps = { + content: ReactNode; + defaultOpen?: boolean; +} + +const Popover: FC> = ({ children, content, defaultOpen = false }) => { + return ( + + + {children} + + + + {content} + + + + + ); +}; + +export default Popover; diff --git a/GUI/src/components/ProgressBar/index.scss b/GUI/src/components/ProgressBar/index.scss new file mode 100644 index 00000000..bc4f3a53 --- /dev/null +++ b/GUI/src/components/ProgressBar/index.scss @@ -0,0 +1,28 @@ +.progress-bar-container { + width: 100%; + display: flex; + flex-direction: column; + align-items: center; + } + + .progress-bar-label { + margin-bottom: 4px; + font-size: 14px; + } + + .progress-bar-root { + position: relative; + overflow: hidden; + background-color: #e0e0e0; + border-radius: 4px; + width: 100%; + height: 10px; + } + + .progress-bar-indicator { + background-color: #07478d; + height: 100%; + transition: width 0.3s; + border-radius: 20px; + } + \ No newline at end of file diff --git a/GUI/src/components/ProgressBar/index.tsx b/GUI/src/components/ProgressBar/index.tsx new file mode 100644 index 00000000..69d6a444 --- /dev/null +++ b/GUI/src/components/ProgressBar/index.tsx @@ -0,0 +1,26 @@ +import React from 'react'; +import * as Progress from '@radix-ui/react-progress'; +import './index.scss'; + +type ProgressBarProps = { + value: number; + max: number; + label?: string; +}; + +const ProgressBar: React.FC = ({ value, max, label }) => { + return ( +
    + + + + {label && } + +
    + ); +}; + +export default ProgressBar; diff --git a/GUI/src/components/Section/Section.scss b/GUI/src/components/Section/Section.scss new file mode 100644 index 00000000..cdbb136e --- /dev/null +++ b/GUI/src/components/Section/Section.scss @@ -0,0 +1,11 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/typography'; + +.section { + padding: get-spacing(haapsalu); + + &:not(:last-child) { + border-bottom: 1px solid get-color(black-coral-2); + } +} diff --git a/GUI/src/components/Section/index.tsx b/GUI/src/components/Section/index.tsx new file mode 100644 index 00000000..7ecd131d --- /dev/null +++ b/GUI/src/components/Section/index.tsx @@ -0,0 +1,13 @@ +import { forwardRef, PropsWithChildren } from 'react'; + +import './Section.scss'; + +const Section = forwardRef(({ children }, ref) => { + return ( +
    + {children} +
    + ); +}); + +export default Section; diff --git a/GUI/src/components/Toast/Toast.scss b/GUI/src/components/Toast/Toast.scss new file mode 100644 index 00000000..fd340916 --- /dev/null +++ b/GUI/src/components/Toast/Toast.scss @@ -0,0 +1,73 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.toast { + padding: 16px; + border-radius: 5px; + border: 1px solid; + display: flex; + flex-direction: column; + gap: 8px; + position: relative; + transition: opacity 0.25s ease-out; + + &__title { + display: flex; + align-items: center; + gap: 8px; + padding-right: 25px; + } + + &__list { + position: fixed; + bottom: 0; + right: 0; + display: flex; + flex-direction: column; + gap: 16px; + padding: 8px; + width: 408px; + max-width: 100vw; + z-index: 9999; + list-style: none; + } + + &__content { + font-size: $veera-font-size-80; + + a { + display: inline; + color: get-color(sapphire-blue-10); + text-decoration: underline; + } + } + + &__close { + position: absolute; + top: 16px; + right: 16px; + font-size: 20px; + } + + &--success { + border-color: get-color(sea-green-10); + background-color: get-color(sea-green-0); + } + + &--info { + border-color: get-color(sapphire-blue-10); + background-color: get-color(sapphire-blue-1); + } + + &--error { + border-color: get-color(jasper-10); + background-color: #FCEEEE; + } + + &--warning { + border-color: get-color(dark-tangerine-10); + background-color: get-color(dark-tangerine-1); + } +} diff --git a/GUI/src/components/Toast/index.tsx b/GUI/src/components/Toast/index.tsx new file mode 100644 index 00000000..ffa29f61 --- /dev/null +++ b/GUI/src/components/Toast/index.tsx @@ -0,0 +1,54 @@ +import { FC, useState } from 'react'; +import * as RadixToast from '@radix-ui/react-toast'; +import { + MdOutlineClose, + MdOutlineInfo, + MdCheckCircleOutline, + MdOutlineWarningAmber, + MdErrorOutline, +} from 'react-icons/md'; +import clsx from 'clsx'; + +import { Icon } from 'components'; +import type { ToastType } from 'context/ToastContext'; +import './Toast.scss'; + +type ToastProps = { + toast: ToastType; + close: () => void; +}; + +const toastIcons = { + info: , + success: , + warning: , + error: , +}; + +const Toast: FC = ({ toast, close }) => { + const [open, setOpen] = useState(true); + + const toastClasses = clsx('toast', `toast--${toast.type}`); + + return ( + + + + {toast.title} + + + {toast.message} + + + } size="medium" /> + + + ); +}; + +export default Toast; diff --git a/GUI/src/components/Tooltip/Tooltip.scss b/GUI/src/components/Tooltip/Tooltip.scss new file mode 100644 index 00000000..bd062f75 --- /dev/null +++ b/GUI/src/components/Tooltip/Tooltip.scss @@ -0,0 +1,16 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/typography'; + +.tooltip { + background-color: get-color(white); + padding: 4px; + border-radius: 4px; + filter: drop-shadow(0px 0px 20px rgba(0, 0, 0, 0.25)); + font-size: $veera-font-size-80; + max-width: 50vw; + + &__arrow { + fill: get-color(white); + } +} diff --git a/GUI/src/components/Tooltip/index.tsx b/GUI/src/components/Tooltip/index.tsx new file mode 100644 index 00000000..3cd41ac2 --- /dev/null +++ b/GUI/src/components/Tooltip/index.tsx @@ -0,0 +1,28 @@ +import { FC, PropsWithChildren, ReactNode } from 'react'; +import * as RadixTooltip from '@radix-ui/react-tooltip'; + +import './Tooltip.scss'; + +type TooltipProps = { + content: ReactNode; +} + +const Tooltip: FC> = ({ content, children }) => { + return ( + + + + {children} + + + + {content} + + + + + + ); +}; + +export default Tooltip; diff --git a/GUI/src/components/Track/index.tsx b/GUI/src/components/Track/index.tsx new file mode 100644 index 00000000..2b66b6e7 --- /dev/null +++ b/GUI/src/components/Track/index.tsx @@ -0,0 +1,57 @@ +import { FC, HTMLAttributes, PropsWithChildren } from 'react'; + +type TrackProps = HTMLAttributes & { + gap?: number; + align?: 'left' | 'center' | 'right' | 'stretch'; + justify?: 'start' | 'between' | 'center' | 'around' | 'end'; + direction?: 'horizontal' | 'vertical'; + isMultiline?: boolean; +} + +const alignMap = { + left: 'flex-start', + center: 'center', + right: 'flex-end', + stretch: 'stretch', +}; + +const justifyMap = { + start: 'flex-start', + between: 'space-between', + center: 'center', + around: 'space-around', + end: 'flex-end', +}; + +const Track: FC> = ( + { + gap = 0, + align = 'center', + justify = 'start', + direction = 'horizontal', + isMultiline = false, + children, + style, + ...rest + }, +) => { + return ( +
    + {children} +
    + ); +}; + +export default Track; diff --git a/GUI/src/components/index.tsx b/GUI/src/components/index.tsx new file mode 100644 index 00000000..5bb3b36f --- /dev/null +++ b/GUI/src/components/index.tsx @@ -0,0 +1,55 @@ +import Layout from './Layout'; +import Button from './Button'; +import Icon from './Icon'; +import Track from './Track'; +import { + FormInput, + FormTextarea, + FormSelect, + FormMultiselect, + Switch, + FormCheckboxes, + FormRadios, + FormCheckbox, + FormDatepicker, + SwitchBox, +} from './FormElements'; +import DataTable from './DataTable'; +import Tooltip from './Tooltip'; +import Card from './Card'; +import Label from './Label'; +import Toast from './Toast'; +import Popover from './Popover'; +import Collapsible from './Collapsible'; +import Box from './Box'; +import Drawer from './Drawer'; +import Dialog from './Dialog'; +import Section from './Section'; + +export { + Layout, + Button, + Icon, + Track, + Tooltip, + DataTable, + FormInput, + FormTextarea, + FormSelect, + FormMultiselect, + FormDatepicker, + Switch, + SwitchBox, + Card, + Label, + Toast, + FormCheckboxes, + FormRadios, + FormCheckbox, + Popover, + Collapsible, + Box, + Drawer, + Dialog, + Section, +}; diff --git a/GUI/src/components/molecules/CircularSpinner/CircularSpinner.tsx b/GUI/src/components/molecules/CircularSpinner/CircularSpinner.tsx new file mode 100644 index 00000000..60eaa8ad --- /dev/null +++ b/GUI/src/components/molecules/CircularSpinner/CircularSpinner.tsx @@ -0,0 +1,19 @@ +import React from 'react'; +import './Spinner.scss'; + +interface SpinnerProps { + size?: number; +} + +const CircularSpinner: React.FC = ({ size = 80 }) => { + return ( +
    +
    +
    + ); +}; + +export default CircularSpinner; \ No newline at end of file diff --git a/GUI/src/components/molecules/CircularSpinner/Spinner.scss b/GUI/src/components/molecules/CircularSpinner/Spinner.scss new file mode 100644 index 00000000..d2297dea --- /dev/null +++ b/GUI/src/components/molecules/CircularSpinner/Spinner.scss @@ -0,0 +1,23 @@ +.spinner-container { + display: flex; + justify-content: center; + align-items: center; + height: 80vh; + } + + .spinner { + border: 4px solid rgba(0, 0, 0, 0.1); + border-top: 4px solid #3498db; + border-radius: 50%; + animation: spin 1s linear infinite; + } + + @keyframes spin { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } + } + \ No newline at end of file diff --git a/GUI/src/components/molecules/DatasetGroupCard/DatasetGroupCard.scss b/GUI/src/components/molecules/DatasetGroupCard/DatasetGroupCard.scss new file mode 100644 index 00000000..a81ab511 --- /dev/null +++ b/GUI/src/components/molecules/DatasetGroupCard/DatasetGroupCard.scss @@ -0,0 +1,91 @@ +.row { + margin: 10px 0; + display: flex; + align-items: center; +} + +.switch-row { + justify-content: space-between; +} + +.status-indicators { + display: flex; + align-items: center; + gap: 10px; +} + +.dot { + width: 10px; + height: 10px; + border-radius: 50%; + margin-right: 8px; +} + +.green { + background-color: green; +} + +.grey { + background-color: grey; +} + +.icon-text-row { + flex-direction: column; + justify-content: center; + text-align: center; +} + +.icon-image { + width: 50px; + height: 50px; +} + +.icon-text { + margin: 5px 0 0 0; + font-size: 16px; +} + +.label-row { + justify-content: flex-end; + display: flex; + align-items: center; + margin-top: 15px; +} + +.left-label { + font-size: 14px; + color: #333; +} + +.status { + display: flex; + align-items: center; + font-size: 12px; + color: #555; + bottom: 10px; + left: 20px; +} + +.colored-label{ + background-color: white; + border: solid 2px #D73E3E; + border-radius: 6px; + color: #D73E3E; + width: fit-content; + padding: 0px 5px; + font-size: 14px; + margin-right: 5px; +} + +.text{ + font-size: 16px; + margin-bottom: 5px; +} + +.py-3{ + padding: 8px 0px; +} + +.flex{ + display: flex; +} \ No newline at end of file diff --git a/GUI/src/components/molecules/DatasetGroupCard/index.tsx b/GUI/src/components/molecules/DatasetGroupCard/index.tsx new file mode 100644 index 00000000..2fd8b479 --- /dev/null +++ b/GUI/src/components/molecules/DatasetGroupCard/index.tsx @@ -0,0 +1,144 @@ +import { FC, PropsWithChildren } from 'react'; +import './DatasetGroupCard.scss'; +import { Switch } from 'components/FormElements'; +import Button from 'components/Button'; +import Label from 'components/Label'; +import { useMutation, useQueryClient } from '@tanstack/react-query'; +import { enableDataset } from 'services/datasets'; +import { useDialog } from 'hooks/useDialog'; +import { Operation } from 'types/datasetGroups'; +import { datasetQueryKeys } from 'utils/queryKeys'; +import { DatasetViewEnum } from 'enums/datasetEnums'; +import { ButtonAppearanceTypes, LabelType } from 'enums/commonEnums'; +import { useTranslation } from 'react-i18next'; +import { formatDate } from 'utils/commonUtilts'; +import DatasetValidationStatus from '../ValidationStatus/ValidationStatus'; + +type DatasetGroupCardProps = { + datasetGroupId: number; + datasetName?: string; + version?: string; + isLatest?: boolean; + isEnabled?: boolean; + lastUpdated?: Date | null; + lastUsed?: Date | null; + validationStatus?: string; + lastModelTrained?: Date | null; + setId?: React.Dispatch>; + setView?: React.Dispatch>; +}; + +const DatasetGroupCard: FC> = ({ + datasetGroupId, + datasetName, + version, + isLatest, + isEnabled, + lastUpdated, + lastUsed, + validationStatus, + lastModelTrained, + setId, + setView, +}) => { + const queryClient = useQueryClient(); + const { open } = useDialog(); + const { t } = useTranslation(); + + const datasetEnableMutation = useMutation({ + mutationFn: (data: Operation) => enableDataset(data), + onSuccess: async () => { + await queryClient.invalidateQueries(datasetQueryKeys.DATASET_OVERVIEW(1)); + }, + onError: () => { + open({ + title: t('datasetGroups.modals.enableDatasetTitle'), + content:

    {t('datasetGroups.modals.enableDatasetDesc')}

    , + }); + }, + }); + + const datasetDisableMutation = useMutation({ + mutationFn: (data: Operation) => enableDataset(data), + onSuccess: async (response) => { + await queryClient.invalidateQueries(datasetQueryKeys.DATASET_OVERVIEW(1)); + if (response?.operationSuccessful) + open({ + title: t('datasetGroups.modals.enableDatasetTitle'), + content:

    {t('datasetGroups.modals.enableDatasetDesc')}

    , + }); + }, + onError: () => { + open({ + title: t('datasetGroups.modals.errorTitle'), + content:

    {t('datasetGroups.modals.errorDesc')}

    , + }); + }, + }); + + const handleCheck = () => { + if (isEnabled) + datasetDisableMutation.mutate({ + dgId: datasetGroupId, + operationType: 'disable', + }); + else + datasetEnableMutation.mutate({ + dgId: datasetGroupId, + operationType: 'enable', + }); + }; + + return ( +
    +
    +
    +
    {datasetName}
    + handleCheck()} + /> +
    + +
    +

    + {t('datasetGroups.datasetCard.lastModelTrained')}:{' '} + {lastModelTrained && formatDate(lastModelTrained, 'D.M.yy-H:m')} +

    +

    + {t('datasetGroups.datasetCard.lastUsedForTraining')}:{' '} + {lastUsed && formatDate(lastUsed, 'D.M.yy-H:m')} +

    +

    + {t('datasetGroups.datasetCard.lastUpdate')}:{' '} + {lastUpdated && formatDate(lastUpdated, 'DD.MM.yy-HH:mm')} +

    +
    +
    + + {isLatest ? ( + + ) : null} +
    + +
    + +
    +
    +
    + ); +}; + +export default DatasetGroupCard; diff --git a/GUI/src/components/molecules/NoDataView/NoDataView.scss b/GUI/src/components/molecules/NoDataView/NoDataView.scss new file mode 100644 index 00000000..e6bde393 --- /dev/null +++ b/GUI/src/components/molecules/NoDataView/NoDataView.scss @@ -0,0 +1,7 @@ +.p-5 { + padding: 5rem; +} + +.text-grey { + color: grey; +} diff --git a/GUI/src/components/molecules/NoDataView/index.tsx b/GUI/src/components/molecules/NoDataView/index.tsx new file mode 100644 index 00000000..c5f2bc8a --- /dev/null +++ b/GUI/src/components/molecules/NoDataView/index.tsx @@ -0,0 +1,24 @@ +import React from 'react'; +import { MdDashboard } from 'react-icons/md'; +import './NoDataView.scss'; +interface NoDataViewProps { + text?: string; + description?: string; +} + +const NoDataView: React.FC = ({ text, description }) => { + return ( +
    + {} +
    + {text} +
    +
    +
    + {description} +
    +
    + ); +}; + +export default NoDataView; diff --git a/GUI/src/components/molecules/Pagination/Pagination.scss b/GUI/src/components/molecules/Pagination/Pagination.scss new file mode 100644 index 00000000..5c89eb88 --- /dev/null +++ b/GUI/src/components/molecules/Pagination/Pagination.scss @@ -0,0 +1,194 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/typography'; + +.data-table { + width: 100%; + color: get-color(black-coral-20); + text-align: left; + margin-bottom: 0; + display: table; + + &__scrollWrapper { + height: 100%; + overflow-x: auto; + white-space: nowrap; + display: block; + padding: 5px; + background-color: white; + border-radius: 10px; + border: solid 1px get-color(black-coral-1); + } + + thead, + tbody { + width: 100%; + } + + th { + padding: 12px 14.5px; + color: get-color(black-coral-12); + border-bottom: 1px solid get-color(black-coral-10); + font-weight: $veera-font-weight-beta; + vertical-align: middle; + position: relative; + } + + td { + padding: 12px 24px 12px 16px; + border-bottom: 1px solid get-color(black-coral-2); + vertical-align: middle; + max-width: fit-content; + + p { + white-space: break-spaces; + } + + .entity { + display: inline-flex; + align-items: center; + padding-left: 4px; + background-color: get-color(sapphire-blue-2); + border-radius: 4px; + + span { + display: inline-flex; + font-size: $veera-font-size-80; + background-color: get-color(white); + padding: 0 4px; + border-radius: 4px; + margin: 2px 2px 2px 4px; + } + } + } + + tbody { + tr { + &:last-child { + td { + border-bottom: 0; + } + } + } + } + + &__filter { + position: absolute; + top: 100%; + left: 0; + right: 0; + padding: get-spacing(paldiski); + background-color: get-color(white); + border-radius: 0 0 4px 4px; + border: 1px solid get-color(black-coral-2); + + input { + width: 100%; + display: block; + appearance: none; + background-color: get-color(white); + border: 1px solid get-color(black-coral-6); + border-radius: 5px; + color: var(--color-black); + font-size: $veera-font-size-100; + height: 32px; + line-height: 24px; + padding: get-spacing(paldiski); + + &::placeholder { + color: get-color(black-coral-6); + } + + &:focus { + outline: none; + border-color: get-color(sapphire-blue-10); + } + } + } + + &__pagination-wrapper { + display: flex; + padding: 6px 16px; + } + + &__pagination { + display: flex; + align-items: center; + gap: 15px; + margin: 0 auto; + + + .data-table__page-size { + margin-left: 0; + } + + .next, + .previous { + display: flex; + color: get-color(sapphire-blue-10); + + &[disabled] { + color: get-color(black-coral-11); + cursor: initial; + } + } + + .links { + display: flex; + align-items: center; + gap: 5px; + font-size: $veera-font-size-80; + color: get-color(black-coral-10); + + li { + display: block; + + a, + span { + display: flex; + align-items: center; + justify-content: center; + width: 25px; + height: 25px; + border-radius: 50%; + + &:hover { + text-decoration: none; + } + } + + &.active { + a, + span { + color: get-color(white); + background-color: get-color(sapphire-blue-10); + } + } + } + } + } + + &__page-size { + display: flex; + align-items: center; + gap: 8px; + font-size: $veera-font-size-80; + line-height: 16px; + color: get-color(black-coral-11); + margin-left: auto; + + select { + appearance: none; + font-size: $veera-font-size-70; + line-height: 16px; + height: 30px; + min-width: 50px; + padding: 6px 10px; + border: 1px solid #8f91a8; + border-radius: 2px; + background-color: get-color(white); + background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTAiIGhlaWdodD0iNiIgdmlld0JveD0iMCAwIDEwIDYiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxwYXRoIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNNS4zMTMwNiA1LjgwODIyQzUuMTU2ODUgNS45NjQ0MyA0LjkwMzU4IDUuOTY0NDMgNC43NDczNyA1LjgwODIyTDAuMjgyNzMgMS4zNDM1OEMwLjEyNjUyIDEuMTg3MzcgMC4xMjY1MiAwLjkzNDEwMiAwLjI4MjczIDAuNzc3ODkzTDAuNzc3NzA0IDAuMjgyOTE4QzAuOTMzOTE0IDAuMTI2NzA4IDEuMTg3MTggMC4xMjY3MDggMS4zNDMzOSAwLjI4MjkxN0w1LjAzMDIyIDMuOTY5NzRMOC43MTcwNCAwLjI4MjkxN0M4Ljg3MzI1IDAuMTI2NzA4IDkuMTI2NTIgMC4xMjY3MDggOS4yODI3MyAwLjI4MjkxN0w5Ljc3NzcgMC43Nzc4OTJDOS45MzM5MSAwLjkzNDEwMiA5LjkzMzkxIDEuMTg3MzcgOS43Nzc3IDEuMzQzNThMNS4zMTMwNiA1LjgwODIyWiIgZmlsbD0iIzU1NTg2NyIvPgo8L3N2Zz4K'); + background-repeat: no-repeat; + background-position: top 11px right 10px; + } + } +} diff --git a/GUI/src/components/molecules/Pagination/index.tsx b/GUI/src/components/molecules/Pagination/index.tsx new file mode 100644 index 00000000..7c1c3b9d --- /dev/null +++ b/GUI/src/components/molecules/Pagination/index.tsx @@ -0,0 +1,66 @@ +import React from 'react'; +import { MdOutlineWest, MdOutlineEast } from 'react-icons/md'; +import clsx from 'clsx'; +import { Link } from 'react-router-dom'; + +interface PaginationProps { + pageCount: number; + pageIndex: number; + canPreviousPage: boolean; + canNextPage: boolean; + onPageChange: (pageIndex: number) => void; + id?: string; +} + +const Pagination: React.FC = ({ + pageCount, + pageIndex, + canPreviousPage, + canNextPage, + onPageChange, + id, +}) => { + return ( +
    + {pageCount > 1 && ( +
    + + + +
    + )} +
    + ); +}; + +export default Pagination; diff --git a/GUI/src/components/molecules/TableSkeleton/SkeletonTable.scss b/GUI/src/components/molecules/TableSkeleton/SkeletonTable.scss new file mode 100644 index 00000000..5f433ecf --- /dev/null +++ b/GUI/src/components/molecules/TableSkeleton/SkeletonTable.scss @@ -0,0 +1,31 @@ +.skeleton { + display: inline-block; + height: 1.5rem; + width: 100%; + background-color: #e0e0e0; + border-radius: 4px; + animation: pulse 1.5s infinite ease-in-out; + } + + @keyframes pulse { + 0% { + background-color: #e0e0e0; + } + 50% { + background-color: #f0f0f0; + } + 100% { + background-color: #e0e0e0; + } + } + + .table { + width: 100%; + border-collapse: collapse; + } + + .table th, + .table td { + padding: 0.75rem; + text-align: left; + } \ No newline at end of file diff --git a/GUI/src/components/molecules/TableSkeleton/TableSkeleton.tsx b/GUI/src/components/molecules/TableSkeleton/TableSkeleton.tsx new file mode 100644 index 00000000..b8a23d3f --- /dev/null +++ b/GUI/src/components/molecules/TableSkeleton/TableSkeleton.tsx @@ -0,0 +1,24 @@ +import React from 'react'; +import './SkeletonTable.scss'; + +interface SkeletonTableProps { + rowCount: number; +} + +const SkeletonTable: React.FC = ({ rowCount }) => { + const skeletonRows = Array.from({ length: rowCount }, (_, index) => ( + + +
    + + + )); + + return ( + + {skeletonRows} +
    + ); +}; + +export default SkeletonTable; \ No newline at end of file diff --git a/GUI/src/components/molecules/UserManagementActionButtons/UserManagementActionButtons.tsx b/GUI/src/components/molecules/UserManagementActionButtons/UserManagementActionButtons.tsx new file mode 100644 index 00000000..4af03ccf --- /dev/null +++ b/GUI/src/components/molecules/UserManagementActionButtons/UserManagementActionButtons.tsx @@ -0,0 +1,91 @@ +import { FC } from 'react'; +import Button from 'components/Button'; +import Icon from 'components/Icon'; +import { useTranslation } from 'react-i18next'; +import { MdOutlineDeleteOutline, MdOutlineEdit } from 'react-icons/md'; +import { User } from 'types/user'; +import { ButtonAppearanceTypes, ToastTypes } from 'enums/commonEnums'; +import { useDialog } from 'hooks/useDialog'; +import { deleteUser } from 'services/users'; +import { userManagementQueryKeys } from 'utils/queryKeys'; +import { useToast } from 'hooks/useToast'; +import { useMutation, useQueryClient } from '@tanstack/react-query'; +import { AxiosError } from 'axios'; + +const ActionButtons: FC<{ + row: User; + setEditableRow: React.Dispatch>; +}> = ({ row, setEditableRow }) => { + const { t } = useTranslation(); + const { open, close } = useDialog(); + const toast = useToast(); + const queryClient = useQueryClient(); + + const deleteUserMutation = useMutation({ + mutationFn: ({ id }: { id: string | number }) => deleteUser(id), + onSuccess: async () => { + close(); + await queryClient.invalidateQueries( + userManagementQueryKeys.getAllEmployees() + ); + toast.open({ + type: ToastTypes.SUCCESS, + title: t('global.notification'), + message: t('toast.success.userDeleted'), + }); + }, + onError: (error: AxiosError) => { + toast.open({ + type: ToastTypes.ERROR, + title: t('global.notificationError'), + message: error?.message ?? '', + }); + }, + }); + + return ( +
    + + + +
    + ), + }); + }} + > + } /> + {t('global.delete')} + + + ); +}; + +export default ActionButtons; diff --git a/GUI/src/config/rolesConfig.json b/GUI/src/config/rolesConfig.json new file mode 100644 index 00000000..02b429cd --- /dev/null +++ b/GUI/src/config/rolesConfig.json @@ -0,0 +1,4 @@ +[ + { "label": "ROLE_ADMINISTRATOR", "value": "ROLE_ADMINISTRATOR" }, + { "label": "ROLE_MODEL_TRAINER", "value": "ROLE_MODEL_TRAINER" } +] diff --git a/GUI/src/constants/config.ts b/GUI/src/constants/config.ts new file mode 100644 index 00000000..5c0855f4 --- /dev/null +++ b/GUI/src/constants/config.ts @@ -0,0 +1,5 @@ +export const EMERGENCY_NOTICE_LENGTH = 250; +export const WELCOME_MESSAGE_LENGTH = 250; +export const USER_IDLE_STATUS_TIMEOUT = 300000; // milliseconds +export const CHAT_INPUT_LENGTH = 500; +export const CHAT_HISTORY_PREFERENCES_KEY = 'chat-history-preferences'; diff --git a/GUI/src/constants/menuIcons.tsx b/GUI/src/constants/menuIcons.tsx new file mode 100644 index 00000000..a53fc7c9 --- /dev/null +++ b/GUI/src/constants/menuIcons.tsx @@ -0,0 +1,24 @@ +import { MdOutlineForum, MdOutlineAdb, MdOutlineEqualizer, MdSettings, MdOutlineMonitorWeight } from 'react-icons/md'; + +export const menuIcons = [ + { + id: 'userManagement', + icon: , + }, + { + id: 'training', + icon: , + }, + { + id: 'analytics', + icon: , + }, + { + id: 'settings', + icon: , + }, + { + id: 'monitoring', + icon: , + }, +]; diff --git a/GUI/src/context/DialogContext.tsx b/GUI/src/context/DialogContext.tsx new file mode 100644 index 00000000..f2b75c44 --- /dev/null +++ b/GUI/src/context/DialogContext.tsx @@ -0,0 +1,83 @@ +import React, { + createContext, + FC, + PropsWithChildren, + ReactNode, + useMemo, + useState, +} from 'react'; +import * as RadixDialog from '@radix-ui/react-dialog'; +import { MdOutlineClose } from 'react-icons/md'; +import clsx from 'clsx'; +import '../components/Dialog/Dialog.scss'; +import Icon from 'components/Icon'; +import Track from 'components/Track'; + +type DialogProps = { + title?: string | null; + footer?: ReactNode; + size?: 'default' | 'large'; + content: ReactNode; +}; + +type DialogContextType = { + open: (dialog: DialogProps) => void; + close: () => void; +}; +// operates Dialog modals where dynamic contents not involved +export const DialogContext = createContext(null!); + +export const DialogProvider: FC> = ({ children }) => { + const [isOpen, setIsOpen] = useState(false); + const [dialogProps, setDialogProps] = useState(null); + + const open = (dialog: DialogProps) => { + setDialogProps(dialog); + setIsOpen(true); + }; + + const close = () => { + setIsOpen(false); + setDialogProps(null); + }; + + const contextValue = useMemo(() => ({ open, close }), []); + + return ( + + {children} + {dialogProps && ( + + + + + {dialogProps.title && ( +
    + + {dialogProps.title} + + + + +
    + )} +
    {dialogProps.content}
    + {dialogProps.footer && ( + + {dialogProps.footer} + + )} +
    +
    +
    + )} +
    + ); +}; diff --git a/GUI/src/context/ToastContext.tsx b/GUI/src/context/ToastContext.tsx new file mode 100644 index 00000000..5c07ef4a --- /dev/null +++ b/GUI/src/context/ToastContext.tsx @@ -0,0 +1,58 @@ +import { + createContext, + FC, + PropsWithChildren, + ReactNode, + useMemo, + useState, +} from 'react'; +import { useTranslation } from 'react-i18next'; +import * as RadixToast from '@radix-ui/react-toast'; + +import { Toast } from 'components'; +import { generateUEID } from 'utils/generateUEID'; + +export type ToastType = { + type: 'info' | 'success' | 'error' | 'warning'; + title: string; + message: ReactNode; +}; + +type ToastTypeWithId = ToastType & { id: string }; + +type ToastContextType = { + open: (toast: ToastType) => void; +}; + +export const ToastContext = createContext(null!); + +export const ToastProvider: FC = ({ children }) => { + const { t } = useTranslation(); + const [toasts, setToasts] = useState([]); + const open = (content: ToastType) => { + setToasts((prevState) => [ + ...prevState, + { id: generateUEID(), ...content }, + ]); + }; + const close = (id: string) => { + setToasts((prevState) => prevState.filter((toast) => toast.id === id)); + }; + + const contextValue = useMemo(() => ({ open }), []); + + return ( + + + {children} + {toasts.map((toast) => ( + close(toast.id)} /> + ))} + + + + ); +}; diff --git a/GUI/src/enums/commonEnums.ts b/GUI/src/enums/commonEnums.ts new file mode 100644 index 00000000..79f9444f --- /dev/null +++ b/GUI/src/enums/commonEnums.ts @@ -0,0 +1,18 @@ +export enum ToastTypes { + SUCCESS = 'success', + ERROR = 'error', +} + +export enum ButtonAppearanceTypes { + PRIMARY = 'primary', + SECONDARY = 'secondary', + ERROR = 'error', + TEXT = 'text', +} + +export enum LabelType { + SUCCESS = 'success', + ERROR = 'error', + INFO = 'info', + WARNING = 'warning', +} diff --git a/GUI/src/enums/correctedTextsEnums.ts b/GUI/src/enums/correctedTextsEnums.ts new file mode 100644 index 00000000..9ed55948 --- /dev/null +++ b/GUI/src/enums/correctedTextsEnums.ts @@ -0,0 +1,5 @@ +export enum CorrectedTextsModalContexts { + EXPORT = 'export', + SUCCESS = 'success', + ERROR = 'error' + } \ No newline at end of file diff --git a/GUI/src/enums/dataModelsEnums.ts b/GUI/src/enums/dataModelsEnums.ts new file mode 100644 index 00000000..e104a4b6 --- /dev/null +++ b/GUI/src/enums/dataModelsEnums.ts @@ -0,0 +1,31 @@ +export enum TrainingStatus { + NOT_TRAINED = 'not trained', + TRAINING_INPROGRESS = 'training in-progress', + TRAINED = 'trained', + RETRAINING_NEEDED = 'retraining needed', + UNTRAINABLE = 'untrainable', +} + +export enum Maturity { + PRODUCTION = 'production ready', + STAGING = 'staging', + DEVELOPMENT = 'development', + TESTING = 'testing', +} + +export enum Platform { + JIRA = 'jira', + OUTLOOK = 'outlook', + UNDEPLOYED = 'undeployed', +} + +export enum UpdateType { + MAJOR = 'major', + MINOR = 'minor', + MATURITY_LABEL = 'maturityLabel', +} + +export enum TrainingSessionsStatuses { + TRAINING_SUCCESS_STATUS = 'Model Trained And Deployed', + TRAINING_FAILED_STATUS = 'Training Failed' +} \ No newline at end of file diff --git a/GUI/src/enums/datasetEnums.ts b/GUI/src/enums/datasetEnums.ts new file mode 100644 index 00000000..1f33c83e --- /dev/null +++ b/GUI/src/enums/datasetEnums.ts @@ -0,0 +1,56 @@ +export enum ValidationStatus { + SUCCESS = 'success', + FAIL = 'fail', + UNVALIDATED = 'unvalidated', + IN_PROGRESS = 'in-progress', +} + +export enum DatasetViewEnum { + LIST = 'list', + INDIVIDUAL = 'individual', +} + +export enum CreateDatasetGroupModals { + SUCCESS = 'SUCCESS', + VALIDATION_ERROR = 'VALIDATION_ERROR', + NULL = 'NULL', +} + +export enum ViewDatasetGroupModalContexts { + EXPORT_MODAL = 'EXPORT_MODAL', + IMPORT_MODAL = 'IMPORT_MODAL', + PATCH_UPDATE_MODAL = 'PATCH_UPDATE_MODAL', + DELETE_ROW_MODAL = 'DELETE_ROW_MODAL', + CONFIRMATION_MODAL = 'CONFIRMATION_MODAL', + NULL = 'NULL', +} + +export enum UpdatePriority { + MAJOR = 'MAJOR', + MINOR = 'MINOR', + PATCH = 'PATCH', + NULL = 'NULL', +} + +export enum ImportExportDataTypes { + XLSX = 'xlsx', + JSON = 'json', + YAML = 'yaml', +} + +export enum StopWordImportOptions { + ADD = 'add', + DELETE = 'delete', +} + +export enum ValidationErrorTypes { + NAME = 'NAME', + CLASS_HIERARCHY = 'CLASS_HIERARCHY', + VALIDATION_CRITERIA = 'VALIDATION_CRITERIA', + NULL = 'NULL', +} + +export enum ValidationSessionsStatuses { + VALIDATION_SUCCESS_STATUS = 'Success', + VALIDATION_FAILED_STATUS = 'Fail' +} \ No newline at end of file diff --git a/GUI/src/enums/roles.ts b/GUI/src/enums/roles.ts new file mode 100644 index 00000000..b5cfd8a6 --- /dev/null +++ b/GUI/src/enums/roles.ts @@ -0,0 +1,4 @@ +export enum ROLES { + ROLE_ADMINISTRATOR = 'ROLE_ADMINISTRATOR', + ROLE_MODEL_TRAINER = 'ROLE_MODEL_TRAINER', +} diff --git a/GUI/src/hoc/with-authorization.tsx b/GUI/src/hoc/with-authorization.tsx new file mode 100644 index 00000000..9874ffa9 --- /dev/null +++ b/GUI/src/hoc/with-authorization.tsx @@ -0,0 +1,29 @@ +import { ROLES } from 'enums/roles'; +import React from 'react'; +import useStore from 'store'; + +function withAuthorization

    ( + WrappedComponent: React.ComponentType

    , + allowedRoles: ROLES[] = [] +): React.FC

    { + const CheckRoles: React.FC

    = ({ ...props }: P) => { + const userInfo = useStore((x) => x.userInfo); + const allowed = allowedRoles?.some((x) => + userInfo?.authorities.includes(x) + ); + + if (!userInfo) { + return Loading...; + } + + if (!allowed) { + return Unauthorized Access; + } + + return ; + }; + + return CheckRoles; +} + +export default withAuthorization; diff --git a/GUI/src/hooks/useDialog.tsx b/GUI/src/hooks/useDialog.tsx new file mode 100644 index 00000000..c38ed60a --- /dev/null +++ b/GUI/src/hooks/useDialog.tsx @@ -0,0 +1,4 @@ +import { DialogContext } from 'context/DialogContext'; +import { useContext } from 'react'; + +export const useDialog = () => useContext(DialogContext); diff --git a/GUI/src/hooks/useDocumentEscapeListener.tsx b/GUI/src/hooks/useDocumentEscapeListener.tsx new file mode 100644 index 00000000..8f7b3b6b --- /dev/null +++ b/GUI/src/hooks/useDocumentEscapeListener.tsx @@ -0,0 +1,17 @@ +import { useLayoutEffect } from 'react'; + +const useDocumentEscapeListener = (callback: () => void) => { + useLayoutEffect(() => { + const handleKeyUp = (event: KeyboardEvent) => { + if (event.key === 'Escape') { + callback(); + } + }; + + document.addEventListener('keyup', handleKeyUp); + + return () => document.removeEventListener('keyup', handleKeyUp); + }, [callback]); +}; + +export default useDocumentEscapeListener; diff --git a/GUI/src/hooks/useOptionLists.tsx b/GUI/src/hooks/useOptionLists.tsx new file mode 100644 index 00000000..f860f166 --- /dev/null +++ b/GUI/src/hooks/useOptionLists.tsx @@ -0,0 +1,26 @@ +import { useTranslation } from 'react-i18next'; + +// maps translations with dropdown options +const useOptionLists = () => { + const { t } = useTranslation(); + + const dataTypesConfigs = [ + { label: t('optionLists.text'), value: 'text' }, + { label: t('optionLists.numbers'), value: 'numbers' }, + { label: t('optionLists.dateTimes'), value: 'datetime' }, + { label: t('optionLists.email'), value: 'email' }, + { label: t('optionLists.fileAttachements'), value: 'file_attachments' }, + ]; + + const importOptionsConfigs = [ + { label: t('optionLists.importToAdd'), value: 'add' }, + { label: t('optionLists.importToDelete'), value: 'delete' }, + ]; + + return { + dataTypesConfigs, + importOptionsConfigs, + }; +}; + +export default useOptionLists; diff --git a/GUI/src/hooks/useToast.tsx b/GUI/src/hooks/useToast.tsx new file mode 100644 index 00000000..51715549 --- /dev/null +++ b/GUI/src/hooks/useToast.tsx @@ -0,0 +1,5 @@ +import { useContext } from 'react'; + +import { ToastContext } from 'context/ToastContext'; + +export const useToast = () => useContext(ToastContext); diff --git a/GUI/src/main.tsx b/GUI/src/main.tsx new file mode 100644 index 00000000..a44091f9 --- /dev/null +++ b/GUI/src/main.tsx @@ -0,0 +1,51 @@ +import React from 'react'; +import ReactDOM from 'react-dom/client'; +import { BrowserRouter } from 'react-router-dom'; +import { + QueryClient, + QueryClientProvider, + QueryFunction, +} from '@tanstack/react-query'; + +import App from './App'; +import api from 'services/api'; +import apiDev from 'services/api-dev'; +import { ToastProvider } from 'context/ToastContext'; +import 'styles/main.scss'; +import '../i18n'; +import { CookiesProvider } from 'react-cookie'; +import { DialogProvider } from 'context/DialogContext'; + +const defaultQueryFn: QueryFunction | undefined = async ({ queryKey }) => { + if (queryKey.includes('prod')) { + const { data } = await apiDev.get(queryKey[0] as string); + return data; + } + + const { data } = await api.get(queryKey[0] as string); + return data; +}; + +const queryClient = new QueryClient({ + defaultOptions: { + queries: { + queryFn: defaultQueryFn, + }, + }, +}); + +ReactDOM.createRoot(document.getElementById('root') as HTMLElement).render( + + + + + + + + + + + + + +); diff --git a/GUI/src/model/ruuter-response-model.ts b/GUI/src/model/ruuter-response-model.ts new file mode 100644 index 00000000..07cafc1c --- /dev/null +++ b/GUI/src/model/ruuter-response-model.ts @@ -0,0 +1,11 @@ +export interface RuuterResponse { + data: Record | null; + error: string | null; +} + +export interface CustomJwtExtendResponse { + data: { + custom_jwt_extend: string; + }; + error: null; +} diff --git a/GUI/src/pages/LoadingScreen/LoadingScreen.scss b/GUI/src/pages/LoadingScreen/LoadingScreen.scss new file mode 100644 index 00000000..c45e573a --- /dev/null +++ b/GUI/src/pages/LoadingScreen/LoadingScreen.scss @@ -0,0 +1,20 @@ +/* Loader container */ +.loader { + position: fixed; + left: 50%; + top: 50%; + transform: translate(-50%, -50%); + border: 8px solid #f3f3f3; /* Light grey */ + border-top: 8px solid #3498db; /* Blue */ + border-radius: 50%; + width: 60px; + height: 60px; + animation: spin 1.5s linear infinite; + } + + /* Spin animation */ + @keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } + } + \ No newline at end of file diff --git a/GUI/src/pages/LoadingScreen/LoadingScreen.tsx b/GUI/src/pages/LoadingScreen/LoadingScreen.tsx new file mode 100644 index 00000000..3f8add92 --- /dev/null +++ b/GUI/src/pages/LoadingScreen/LoadingScreen.tsx @@ -0,0 +1,12 @@ +import { FC } from 'react'; +import './LoadingScreen.scss' + +const LoadingScreen: FC = () => { + return ( +

    +
    +
    + ); +}; + +export default LoadingScreen; \ No newline at end of file diff --git a/GUI/src/pages/Unauthorized/unauthorized.scss b/GUI/src/pages/Unauthorized/unauthorized.scss new file mode 100644 index 00000000..3c1bb0ff --- /dev/null +++ b/GUI/src/pages/Unauthorized/unauthorized.scss @@ -0,0 +1,30 @@ +.unauthorized-container { + display: flex; + align-items: center; + justify-content: center; + height: 100vh; + background-color: #f0f2f5; + padding: 20px; + box-sizing: border-box; + } + + .unauthorized-card { + background-color: #fff; + padding: 40px; + border-radius: 8px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); + text-align: center; + max-width: 400px; + width: 100%; + } + + .unauthorized-header { + font-size: 2.5em; + color: #333; + margin-bottom: 20px; + } + + .unauthorized-message { + font-size: 1.2em; + color: #555; + } diff --git a/GUI/src/pages/Unauthorized/unauthorized.tsx b/GUI/src/pages/Unauthorized/unauthorized.tsx new file mode 100644 index 00000000..088fd80b --- /dev/null +++ b/GUI/src/pages/Unauthorized/unauthorized.tsx @@ -0,0 +1,17 @@ +import { FC } from 'react'; +import './unauthorized.scss'; +import { useTranslation } from 'react-i18next'; + +const Unauthorized: FC = () => { + const { t } = useTranslation(); + return ( +
    +
    +

    {t('global.unAuthorized')}

    +

    {t('global.unAuthorizedDesc')}

    +
    +
    + ); +}; + +export default Unauthorized; diff --git a/GUI/src/pages/UserManagement/SettingsUsers.scss b/GUI/src/pages/UserManagement/SettingsUsers.scss new file mode 100644 index 00000000..37e5f63b --- /dev/null +++ b/GUI/src/pages/UserManagement/SettingsUsers.scss @@ -0,0 +1,48 @@ +@import 'src/styles/tools/spacing'; +@import 'src/styles/tools/color'; +@import 'src/styles/settings/variables/other'; +@import 'src/styles/settings/variables/typography'; + +.multiSelect { + $self: &; + display: flex; + align-items: center; + gap: get-spacing(paldiski); + width: 100%; + &::placeholder { + color: get-color(black-coral-6); + font-size: small; + } + + &__label { + flex: 0 0 185px; + font-size: $veera-font-size-100; + line-height: 24px; + } + + + &__wrapper { + width: 390px; + flex: 1; + display: block; + flex-direction: column; + gap: 7px; + position: relative; + border: 0.15px solid get-color(black-coral-6); + border-radius: $veera-radius-s; + } +} + +.footer-button-wrapper { + display: flex; + gap: 10px; +} + +.button-wrapper { + display: flex; + gap: 10px; +} + +.error-span { + color: get-color(jasper-10); +} \ No newline at end of file diff --git a/GUI/src/pages/UserManagement/UserManagement.scss b/GUI/src/pages/UserManagement/UserManagement.scss new file mode 100644 index 00000000..969be2a6 --- /dev/null +++ b/GUI/src/pages/UserManagement/UserManagement.scss @@ -0,0 +1,28 @@ + +.button { + background-color: #007bff; + padding: 10px 20px; + border: none; + border-radius: 4px; + cursor: pointer; + font-size: 1rem; +} + +.button:hover { + background-color: #0056b3; +} + +.form-group { + margin-bottom: 20px; +} + +.table-header { + display: flex; + width: 100%; + justify-content: end; +} + +.action-button-container { + display: flex; + gap: 10px; +} diff --git a/GUI/src/pages/UserManagement/UserModal.tsx b/GUI/src/pages/UserManagement/UserModal.tsx new file mode 100644 index 00000000..a4696e6a --- /dev/null +++ b/GUI/src/pages/UserManagement/UserModal.tsx @@ -0,0 +1,298 @@ +import { useForm, Controller, useWatch } from 'react-hook-form'; +import { useTranslation } from 'react-i18next'; +import { AxiosError } from 'axios'; +import { useMutation, useQueryClient } from '@tanstack/react-query'; + +import { Button, Dialog, FormInput, Track } from 'components'; +import { User, UserDTO } from 'types/user'; +import { checkIfUserExists, createUser, editUser } from 'services/users'; +import { useToast } from 'hooks/useToast'; +import Select, { components } from 'react-select'; +import './SettingsUsers.scss'; +import { FC, useEffect, useMemo, useState } from 'react'; +import { ROLES } from 'enums/roles'; +import { userManagementQueryKeys } from 'utils/queryKeys'; +import { ButtonAppearanceTypes, ToastTypes } from 'enums/commonEnums'; +import { FaChevronDown, FaChevronUp } from 'react-icons/fa'; + +type UserModalProps = { + onClose: () => void; + user?: User; + isModalOpen?: boolean; +}; + +const DropdownIndicator = (props: any) => { + return ( + + {props.selectProps.menuIsOpen ? : } + + ); +}; + +const UserModal: FC = ({ onClose, user, isModalOpen }) => { + const { t } = useTranslation(); + const toast = useToast(); + const queryClient = useQueryClient(); + const [isValidIdentification, setIsValidIdentification] = + useState(false); + + const { + register, + control, + handleSubmit, + formState: { errors, isDirty }, + } = useForm({ + defaultValues: { + useridcode: user?.useridcode, + authorities: user?.authorities, + csaTitle: user?.csaTitle, + csaEmail: user?.csaEmail, + fullName: user?.firstName && user?.lastName ?`${user?.firstName} ${user?.lastName}`:"", + }, + }); + + const watchedValues = useWatch({ + control }); + + const roles = useMemo( + () => [ + { label: t('roles.ROLE_ADMINISTRATOR'), value: ROLES.ROLE_ADMINISTRATOR }, + { + label: t('roles.ROLE_MODEL_TRAINER'), + value: ROLES.ROLE_MODEL_TRAINER, + }, + ], + [t] + ); + + const userCreateMutation = useMutation({ + mutationFn: (data: UserDTO) => createUser(data), + onSuccess: async () => { + await queryClient.invalidateQueries( + userManagementQueryKeys.getAllEmployees() + ); + toast.open({ + type: ToastTypes.SUCCESS, + title: t('global.notification'), + message: t('toast.success.newUserAdded'), + }); + onClose(); + }, + onError: (error: AxiosError) => { + toast.open({ + type: ToastTypes.ERROR, + title: t('global.notificationError'), + message: error?.message ?? '', + }); + }, + }); + + const userEditMutation = useMutation({ + mutationFn: ({ + id, + userData, + }: { + id: string | number; + userData: UserDTO; + }) => editUser(id, userData), + onSuccess: async () => { + await queryClient.invalidateQueries( + userManagementQueryKeys.getAllEmployees() + ); + toast.open({ + type: ToastTypes.SUCCESS, + title: t('global.notification'), + message: t('toast.success.userUpdated'), + }); + onClose(); + }, + onError: (error: AxiosError) => { + toast.open({ + type: ToastTypes.ERROR, + title: t('global.notificationError'), + message: error?.message ?? '', + }); + }, + }); + + const checkIfUserExistsMutation = useMutation({ + mutationFn: ({ userData }: { userData: UserDTO }) => + checkIfUserExists(userData), + onSuccess: async (data) => { + if (data.response === 'true') { + setIsValidIdentification(false); + toast.open({ + type: ToastTypes.ERROR, + title: t('global.notificationError'), + message: t('userManagement.addUser.userExists'), + }); + } else { + createNewUser(); + } + }, + onError: (error: AxiosError) => { + toast.open({ + type: ToastTypes.ERROR, + title: t('global.notificationError'), + message: error?.message, + }); + }, + }); + + const createNewUser = handleSubmit((userData) => + userCreateMutation.mutate(userData) + ); + + const handleUserSubmit = handleSubmit((data) => { + if (user) userEditMutation.mutate({ id: user.useridcode, userData: data }); + else checkIfUserExistsMutation.mutate({ userData: data }); + }); + + const hasChangedFields = () => { + return ( + watchedValues.useridcode !== user?.useridcode || + watchedValues.authorities?.join(',') !== user?.authorities?.join(',') || + watchedValues !== user?.displayName || + watchedValues.csaTitle !== user?.csaTitle || + watchedValues.csaEmail !== user?.csaEmail); + }; + + return ( + + + + + } + > + + + {errors?.fullName && ( + {errors?.fullName?.message} + )} + + ( +
    + +
    + + + {options.map(opt => ( + + ))} + +
    + )} + + ); +}; + +export default DropdownFilter; \ No newline at end of file diff --git a/GUI/src/components/DataTable/index.tsx b/GUI/src/components/DataTable/index.tsx index 0c7b67f3..58317410 100644 --- a/GUI/src/components/DataTable/index.tsx +++ b/GUI/src/components/DataTable/index.tsx @@ -29,10 +29,10 @@ import { import clsx from 'clsx'; import { Link } from 'react-router-dom'; import { useTranslation } from 'react-i18next'; - import { Icon, Track } from 'components'; import Filter from './Filter'; import './DataTable.scss'; +import DropdownFilter from './DropdownFilter'; type DataTableProps = { data: any; @@ -52,6 +52,9 @@ type DataTableProps = { disableHead?: boolean; pagesCount?: number; meta?: TableMeta; + dropdownFilters?: DropdownFilterConfig[]; + onSelect?: (value: string | number) => void | undefined// Callback for dropdown filter selection + }; type ColumnMeta = { @@ -62,6 +65,11 @@ type ColumnMeta = { type CustomColumnDef = ColumnDef & ColumnMeta; +type DropdownFilterConfig = { + columnId: string; + options: { label: string; value: string | number }[]; +}; + declare module '@tanstack/table-core' { interface FilterFns { fuzzy: FilterFn; @@ -108,6 +116,8 @@ const DataTable: FC = ( disableHead, pagesCount, meta, + dropdownFilters, + onSelect }, ) => { const id = useId(); @@ -153,11 +163,11 @@ const DataTable: FC = ( {!disableHead && ( - {table.getHeaderGroups().map((headerGroup) => ( - - {headerGroup.headers.map((header) => ( - + {headerGroup.headers.map((header) => ( + - ))} - - ))} + )} + + ))} + + ))} )} - {tableBodyPrefix} - {table.getRowModel().rows.map((row) => ( - - {row.getVisibleCells().map((cell) => ( - - ))} - - ))} + {tableBodyPrefix} + {table.getRowModel().rows.map((row) => ( + + {row.getVisibleCells().map((cell) => ( + + ))} + + ))}
    - {header.isPlaceholder ? null : ( + {table.getHeaderGroups().map((headerGroup) => ( +
    + {header.isPlaceholder ? null : ( {sortable && header.column.getCanSort() && ( )} {flexRender(header.column.columnDef.header, header.getContext())} {filterable && header.column.getCanFilter() && ( - + (() => { + const dropdownConfig = dropdownFilters?.find( + (df) => df.columnId === header.column.id + ); + + if (dropdownConfig) { + return ( + { })} + /> + ); + } + return ; + })() )} - )} -
    {flexRender(cell.column.columnDef.cell, cell.getContext())}
    {flexRender(cell.column.columnDef.cell, cell.getContext())}
    {pagination && ( diff --git a/GUI/src/pages/ViewDataset/index.tsx b/GUI/src/pages/ViewDataset/index.tsx index abb673f9..c73cd009 100644 --- a/GUI/src/pages/ViewDataset/index.tsx +++ b/GUI/src/pages/ViewDataset/index.tsx @@ -14,7 +14,7 @@ import SkeletonTable from '../../components/molecules/TableSkeleton/TableSkeleto import { sampleDatasetRows } from 'data/sampleDataset'; import DynamicForm from 'components/FormElements/DynamicForm'; import { datasetQueryKeys, integratedAgenciesQueryKeys } from 'utils/queryKeys'; -import { getDatasetMetadata } from 'services/datasets'; +import { getDatasetData, getDatasetMetadata } from 'services/datasets'; import { useQuery } from '@tanstack/react-query'; import { set } from 'date-fns'; import { useDialog } from 'hooks/useDialog'; @@ -31,7 +31,6 @@ const ViewDataset = () => { const isMetadataLoading = false; // Sample data for demonstration purposes const datasets = sampleDatasetRows; - const [updatedDataset, setUpdatedDataset] = useState(datasets?.dataPayload); const [deletedRowIds, setDeletedRowIds] = useState<(string | number)[]>([]); const [searchParams] = useSearchParams(); const datasetId = searchParams.get('datasetId'); @@ -43,6 +42,12 @@ const ViewDataset = () => { queryFn: () => getDatasetMetadata(datasetId ?? 0), }); + const { data: dataset, isLoading: datasetIsLoading } = useQuery({ + queryKey: datasetQueryKeys.GET_DATA_SETS(datasetId ?? 0, 'all', 1), + queryFn: () => getDatasetData(datasetId ?? 0, 'all', 1), + }); + const [updatedDataset, setUpdatedDataset] = useState(dataset); + const { data: agencies } = useQuery({ queryKey: integratedAgenciesQueryKeys.ALL_AGENCIES_LIST(), queryFn: () => fetchAllAgencies(), @@ -125,7 +130,7 @@ const ViewDataset = () => { }); } // Update the table view as before - const payload = updatedDataset?.map((row) => + const payload = updatedDataset?.map((row: any) => row.id === selectedRow?.id ? { id: dataRow.id, @@ -139,7 +144,7 @@ const ViewDataset = () => { const deleteDataRecord = (dataRow: SelectedRowPayload) => { if (!dataRow) return; - setUpdatedDataset((prev) => prev?.filter(row => row.id !== dataRow.id)); + setUpdatedDataset((prev: { id: number; question: string; clientName: string; clientId: string }[] | undefined) => prev?.filter((row: { id: number }) => row.id !== dataRow.id)); setDeletedRowIds((prev) => [...prev, dataRow.id]); close(); }; @@ -183,7 +188,7 @@ const ViewDataset = () => {
    -
    +
    @@ -200,6 +205,19 @@ const ViewDataset = () => { columns={dataColumns as ColumnDef[]} pagination={pagination} filterable + dropdownFilters={[ + { + columnId: 'clientName', + options: agencies?.map((a: { agencyName: string; agencyId: number }) => ({ + label: a.agencyName, + value: a.agencyId, + clientId: a.agencyId, + })) ?? [], + }, + ]} + onSelect={(value) => { + console.log('Selected option:', value); + }} setPagination={(state: PaginationState) => { if ( state.pageIndex === pagination.pageIndex && @@ -208,7 +226,7 @@ const ViewDataset = () => { return; setPagination(state); }} - pagesCount={10} + pagesCount={1} isClientSide={false} /> )} diff --git a/GUI/src/services/datasets.ts b/GUI/src/services/datasets.ts index 26be1f51..6048c56e 100644 --- a/GUI/src/services/datasets.ts +++ b/GUI/src/services/datasets.ts @@ -25,4 +25,19 @@ export async function getDatasetMetadata( }, }); return data?.response?.[0] ?? []; +} + +export async function getDatasetData( + datasetId: number |string, + agencyId?: number |string, + pageNum?: number, +) { + const { data } = await apiDev.get(datasetsEndpoints.GET_DATASETS_DATA(), { + params: { + datasetId, + agencyId, + pageNum : pageNum ?? 1, + }, + }); + return data?.response?.data ?? []; } \ No newline at end of file diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index 0141a9dd..d37151ec 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -17,6 +17,8 @@ export const integratedAgenciesEndPoints = { export const datasetsEndpoints = { GET_OVERVIEW: (): string => '/global-classifier/datasets/list', GET_METADATA: (): string => `/global-classifier/datasets/metadata`, + GET_DATASETS_DATA: (): string => '/global-classifier/datasets/overview', + GET_DATASET_FILTERS: (): string => diff --git a/GUI/src/utils/queryKeys.ts b/GUI/src/utils/queryKeys.ts index 67af82b8..2fd5d4a2 100644 --- a/GUI/src/utils/queryKeys.ts +++ b/GUI/src/utils/queryKeys.ts @@ -42,11 +42,13 @@ export const datasetQueryKeys = { (val) => val !== undefined ); }, - GET_DATA_SETS: function (dgId?: number, pagination?: PaginationState) { - return ['datasets/groups/data', `${dgId}`, pagination].filter( + GET_DATA_SETS: function (datasetId?: number|string, agencyId?:number|string, pageNum?: number) { + return ['datasets/data', datasetId, agencyId,pageNum].filter( (val) => val !== undefined ); }, + + GET_DATASET_GROUP_PROGRESS: () => ['datasetgroups/progress'], }; diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index fb9ddd73..6ae78f0a 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -50,7 +50,8 @@ "sessionTimeOutDesc": "Extend your session or sign out from application in {{seconds}}", "close":"Close", "proceed":"Proceed", - "maxFileSize":"File size should not exceed 20 MB." + "maxFileSize":"File size should not exceed 20 MB.", + "select": "-Select-" }, "menu": { "userManagement": "User Management", From bacc27c98da7bdee41309ce340087e559cd4e0cd Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Tue, 24 Jun 2025 22:25:36 +0530 Subject: [PATCH 050/195] updated model training module architecture --- .../achitecture/data-pipeline-architecture.md | 0 .../first-time-agency-dataset-import-flow.md | 96 ++ ...r-dataset-pipeline-architecture -v2.drawio | 1428 ++++++++++++++++- docs/gc_model_training_module.drawio | 857 ++++++++++ 4 files changed, 2311 insertions(+), 70 deletions(-) create mode 100644 docs/achitecture/data-pipeline-architecture.md create mode 100644 docs/achitecture/first-time-agency-dataset-import-flow.md create mode 100644 docs/gc_model_training_module.drawio diff --git a/docs/achitecture/data-pipeline-architecture.md b/docs/achitecture/data-pipeline-architecture.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/achitecture/first-time-agency-dataset-import-flow.md b/docs/achitecture/first-time-agency-dataset-import-flow.md new file mode 100644 index 00000000..5d8de27a --- /dev/null +++ b/docs/achitecture/first-time-agency-dataset-import-flow.md @@ -0,0 +1,96 @@ +# First-Time Agency Dataset Import Flow + +## 1. Overview + +This document describes the architectural flow for the first-time import and generation of a dataset for a new agency within the Global Classifier system. This process is initiated after an agency is recognized by the system and its data needs to be ingested from the Common Knowledge Base (CKB) and processed into a usable format for the classifier. + +The primary orchestrator of this flow is the `/globalclassifier/POST/cronmanager/agency/data/generate` service, which coordinates data retrieval, metadata management, the dataset generation process, and storage. + +## 2. Actors and Components Involved + +* **Global Classifier Cron Manager (`/globalclassifier/POST/cronmanager/agency/data/generate`)**: The central service that orchestrates the entire first-time dataset generation flow. +* **Common Knowledge Base (CKB)**: The external system and source for raw agency data. It exposes data via S3 storage. +* **CKB S3 Storage**: Stores the raw agency datasets. + * **CKB S3 Storage**: Stores the raw agency datasets. +* **Global Classifier Storage**: Internal storage (e.g., S3 bucket) used by the Global Classifier to store downloaded raw data (temporarily) and the final processed datasets. +* **Global Classifier ReSQL Database**: Stores metadata related to agencies, datasets, and their generation progress. Accessed via ReSQL endpoints. + * Key table: `GcIntegratedAgencies` (stores agency-specific information). + * Dataset metadata tables (for tracking status, location of data, etc.). +* **Dataset Generator Service (`/dataset-generator/POST/cronmanager/dataset/generate`)**: A dedicated service responsible for taking the raw agency data and transforming it into the structured dataset required by the Global Classifier. +* **Global Classifier SSE Service (`/globalclassifier/SSE/dataset-generation-progress`)**: Provides Server-Sent Events for real-time monitoring of the dataset generation progress, typically consumed by a UI. +* **Integrate Agencies Main Interface**: A user interface or system entry point for managing agency integrations, which may lead to the initiation of this flow. + +## 3. Detailed Flow Steps + +The following steps outline the process for a first-time dataset import and generation for a specific agency: + +### 3.1. Initiation + +1. **Trigger**: The `/globalclassifier/POST/cronmanager/agency/data/generate` cron job is initiated for a specific agency. + * *(Assumption: This is triggered for an agency that has been newly onboarded or marked as requiring its first dataset generation, likely following an action from the "Integrate Agencies Main Interface" or an automated detection of a new agency requiring data.)* + +### 3.2. Data Retrieval from CKB + +2. **Obtain Secure Data URL**: The Cron Manager service interacts with CKB to obtain a pre-signed S3 URL for the specific agency's raw dataset. This URL provides secure, temporary access to the data. +3. **Download Raw Data**: Using the obtained pre-signed S3 URL, the Cron Manager service downloads the raw agency data from the CKB S3 storage into the Global Classifier's environment. + +### 3.3. Data Storage (Raw Data) + +4. **Store Raw Data Temporarily**: The downloaded raw agency data is uploaded to a temporary location within the Global Classifier's internal storage. This makes the data accessible for the subsequent generation process. + +### 3.4. Metadata Management (Initial) + +5. **Create Initial Dataset Metadata**: The Cron Manager service calls a ReSQL endpoint (e.g., `/globalclassifier/resql/add-dataset-metadata`) to create an initial metadata record for this new dataset. This record typically includes: + * A unique dataset ID. + * Link to the `agency_id`. + * Initial status (e.g., "Pending Generation", "Data Retrieved"). + * Timestamp of creation. + * Reference to the location of the raw data in Global Classifier storage. + +### 3.5. Dataset Generation + +6. **Invoke Dataset Generator**: The Cron Manager service triggers the `/dataset-generator/POST/cronmanager/dataset/generate` service. It passes necessary information, such as the location of the raw data in GC storage and the dataset metadata ID. +7. **Data Processing**: The Dataset Generator service performs the core data transformation: + * Reads the raw data. + * Cleans, preprocesses, and transforms the data into the required schema and format for the Global Classifier. + * During this process, it periodically updates the dataset's metadata record in the ReSQL database (e.g., via `/globalclassifier/resql/update-dataset-generation-progress`) with the current status (e.g., "Sync in progress with CKB" or "Sync completed"). + * It also publishes these status updates to the `/globalclassifier/SSE/dataset-generation-progress` endpoint, allowing the dataset generator UI to monitor in real-time. + * It also publishes these progress updates to the `/globalclassifier/SSE/dataset-generation-progress` endpoint, allowing UIs or other services to monitor in real-time. + +### 3.6. Data Storage (Processed Dataset) + +8. **Store Generated Dataset**: Upon successful completion of the generation process, the Dataset Generator service uploads the final, processed dataset to a designated location in the Global Classifier's internal storage. + +### 3.7. Metadata Management (Final) + +9. **Finalize Dataset Metadata**: The dataset's metadata record in the ReSQL database is updated to reflect the completion of the generation process. + +### 3.8. Cleanup + +10. **Delete Temporary Raw Data**: After the generated dataset is successfully stored and metadata is updated, the Cron Manager service deletes the temporary copy of the raw agency data that was downloaded from CKB and stored in the Global Classifier's environment. + * *(Assumption: This step refers to deleting the local copy within the Global Classifier's system, not the original data in CKB S3 storage.)* + +## 4. Progress Monitoring + +* The status of the dataset generation can be tracked by querying the dataset metadata via ReSQL. +* Real-time progress updates are available by subscribing to the `/globalclassifier/SSE/dataset-generation-progress` Server-Sent Events stream. + +## 5. Key API Endpoints and Services Involved + +* **Orchestration**: + * `POST /globalclassifier/cronmanager/agency/data/generate`: Initiates and manages the first-time dataset generation for an agency. +* **Data Source Interaction (CKB - Conceptual)**: + * Interaction to get a signed S3 URL for agency data (e.g., an internal CKB API or library). + * `GET /ckb/GET/agency/data/exists`: (Potentially a preliminary check before initiating the main flow) Checks if data for an agency exists in CKB. +* **Dataset Generation**: + * `POST /dataset-generator/cronmanager/dataset/generate`: Triggers the actual processing of raw data into a usable dataset. +* **Metadata Management (ReSQL)**: + * `POST /globalclassifier/resql/add-dataset-metadata`: Creates initial metadata for the dataset. + * `POST /globalclassifier/resql/update-dataset-generation-progress` (or similar): Updates the status and progress of dataset generation. +* **Progress Monitoring (SSE)**: + * `GET /globalclassifier/SSE/dataset-generation-progress`: Provides a stream of real-time updates on generation progress. +* **Agency Information (ReSQL/API)**: + * `GET /globalclassifier/GET/agencies`: Lists integrated agencies. + * `GET /globalclassifier/resql/get-integrated-agency`: Retrieves details for a specific integrated agency. + +This flow ensures that data from new agencies is systematically imported, processed, and made available for the Global Classifier, with appropriate metadata tracking and progress visibility. diff --git a/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio b/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio index ac99cd7a..2c98af81 100644 --- a/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio +++ b/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio @@ -1,27 +1,30 @@ - + - + + + + - + - + - + - + @@ -30,17 +33,17 @@ - + - + - + @@ -60,10 +63,16 @@ - - + + + + + + + + - + @@ -78,7 +87,7 @@ - + @@ -155,27 +164,27 @@ - + - + - + - + - + @@ -192,28 +201,16 @@ - - - - - - - - - - - - - + - + - + @@ -225,100 +222,100 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -329,20 +326,20 @@ - + - + - + - + @@ -466,6 +463,9 @@ + + + @@ -582,7 +582,7 @@ - + @@ -676,7 +676,7 @@ - + @@ -686,7 +686,7 @@ - + @@ -703,4 +703,1292 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/gc_model_training_module.drawio b/docs/gc_model_training_module.drawio new file mode 100644 index 00000000..ae2405e6 --- /dev/null +++ b/docs/gc_model_training_module.drawio @@ -0,0 +1,857 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 507e2217c8e6761d783de0e13a90b9a8fc69386c Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Tue, 24 Jun 2025 23:29:10 +0530 Subject: [PATCH 051/195] updated architecture for model training --- docs/gc_model_training_module.drawio | 29 +++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/docs/gc_model_training_module.drawio b/docs/gc_model_training_module.drawio index ae2405e6..7effc396 100644 --- a/docs/gc_model_training_module.drawio +++ b/docs/gc_model_training_module.drawio @@ -1,6 +1,6 @@ - - - + + + @@ -803,11 +803,11 @@ - + - + @@ -854,4 +854,23 @@ + + + + + + + + + + + + + + + + + + + From a1e8f137e3a58de99c7f1ca718dc5d8b176a2c92 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Wed, 25 Jun 2025 11:00:56 +0530 Subject: [PATCH 052/195] feat: chunk retrieval and metadata insertion. --- .../hbs/get_paginated_chunk_ids.handlebars | 1 + DSL/DMapper/global-classifier/lib/helpers.js | 74 +++++++++++-------- ...lassifier-script-v10-datasets-metadata.sql | 3 +- .../POST/get-chunks-and-agencies.sql | 4 + .../POST/insert-metadata.sql | 11 +++ .../GET/datasets/overview.yml | 38 +++++++--- 6 files changed, 88 insertions(+), 43 deletions(-) create mode 100644 DSL/DMapper/global-classifier/hbs/get_paginated_chunk_ids.handlebars create mode 100644 DSL/Resql/global-classifier/POST/get-chunks-and-agencies.sql create mode 100644 DSL/Resql/global-classifier/POST/insert-metadata.sql diff --git a/DSL/DMapper/global-classifier/hbs/get_paginated_chunk_ids.handlebars b/DSL/DMapper/global-classifier/hbs/get_paginated_chunk_ids.handlebars new file mode 100644 index 00000000..d4f63d0f --- /dev/null +++ b/DSL/DMapper/global-classifier/hbs/get_paginated_chunk_ids.handlebars @@ -0,0 +1 @@ +{{{getPaginatedChunkIds chunks agencyId pageNum 5}}} \ No newline at end of file diff --git a/DSL/DMapper/global-classifier/lib/helpers.js b/DSL/DMapper/global-classifier/lib/helpers.js index 5fbc0721..d55169d5 100644 --- a/DSL/DMapper/global-classifier/lib/helpers.js +++ b/DSL/DMapper/global-classifier/lib/helpers.js @@ -203,35 +203,7 @@ export function extractNewAgencies(gcAgencies, centopsAgencies) { * @returns {Object} Parsed JSON content of the file */ export function getAllChunksFromS3(datasetId, pageNum) { -// const s3Path = `/datasets/${datasetId}.json`; -// const s3FerryUrl = "http://gc-s3-ferry:3000"; -// const localDir = `/tmp/datasets/${datasetId}`; -// const fileName = path.basename(s3Path); -// const localPath = path.join(localDir, fileName); - -// // Request S3 Ferry to transfer the file from S3 to local FS -// const res = await fetch(s3FerryUrl, { -// method: "POST", -// headers: { "Content-Type": "application/json" }, -// body: JSON.stringify({ -// source_path: s3Path, -// source_type: "S3", -// destination_path: localPath, -// destination_type: "FS" -// }) -// }); - -// if (!res.ok) { -// throw new Error(`S3 Ferry transfer failed: ${res.status} ${await res.text()}`); -// } -// // Read and parse the JSON file -// const fileContent = await fs.readFile(localPath, "utf-8"); -// const data = JSON.parse(fileContent); - -// // Optionally clean up -// await fs.unlink(localPath); - - // return data; + return JSON.stringify({ data: [ { id: 1, question: "How do I renew my passport?", clientName: "Tax Department", clientId: "12" }, @@ -243,3 +215,47 @@ export function getAllChunksFromS3(datasetId, pageNum) { }); } +export function getPaginatedChunkIds(chunks, agencyId, pageNum, pageSize = 5) { + let agencyRecordIndex = 0; // total agency records seen so far + let collected = 0; // agency records collected for this page + let resultChunks = []; + let startIndex = 0; + let foundPage = false; + + for (const chunk of chunks) { + // Robustly parse included_agencies + let agencies=JSON.parse(chunk.includedAgencies.value) + + + const count = agencies.filter(a => String(a) === String(agencyId)).length; + if (count === 0) continue; + + // If we haven't reached the start of this page, skip these records + if (!foundPage && agencyRecordIndex + count < (pageNum - 1) * pageSize + 1) { + agencyRecordIndex += count; + continue; + } + + // If this is the first chunk of the page, calculate startIndex + if (!foundPage) { + startIndex = (pageNum - 1) * pageSize - agencyRecordIndex; + foundPage = true; + } + + resultChunks.push(chunk.chunkId || chunk.chunkId); + collected += count; + + // If we've collected enough, stop + if (collected >= pageSize) break; + + agencyRecordIndex += count; + } + + return JSON.stringify( + { + chunks: resultChunks, + startIndex: startIndex + } + ); +} + diff --git a/DSL/Liquibase/changelog/global-classifier-script-v10-datasets-metadata.sql b/DSL/Liquibase/changelog/global-classifier-script-v10-datasets-metadata.sql index d0b9b2ab..9804c3b4 100644 --- a/DSL/Liquibase/changelog/global-classifier-script-v10-datasets-metadata.sql +++ b/DSL/Liquibase/changelog/global-classifier-script-v10-datasets-metadata.sql @@ -5,8 +5,7 @@ CREATE TABLE public.dataset_metadata ( id BIGSERIAL PRIMARY KEY, dataset_id VARCHAR(255) NOT NULL, chunk_id VARCHAR(255) NOT NULL, - included_agencies JSONB NOT NULL, -- Example: ["agency1", "agency2"] - row_ids JSONB NOT NULL, -- Example: [101, 102, 103] + included_agencies JSONB NOT NULL, created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP ); diff --git a/DSL/Resql/global-classifier/POST/get-chunks-and-agencies.sql b/DSL/Resql/global-classifier/POST/get-chunks-and-agencies.sql new file mode 100644 index 00000000..038c8edd --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-chunks-and-agencies.sql @@ -0,0 +1,4 @@ +SELECT chunk_id, included_agencies +FROM public.dataset_metadata +WHERE dataset_id = :datasetId +ORDER BY chunk_id::int \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/insert-metadata.sql b/DSL/Resql/global-classifier/POST/insert-metadata.sql new file mode 100644 index 00000000..769d0a56 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/insert-metadata.sql @@ -0,0 +1,11 @@ +INSERT INTO public.dataset_metadata ( + dataset_id, + chunk_id, + included_agencies, + created_at +) VALUES ( + :datasetId, + :chunkId, + :includedAgencies::jsonb, + NOW() +) \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml b/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml index e680a646..23e2c1c4 100644 --- a/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml +++ b/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml @@ -28,16 +28,7 @@ checkFilter: switch: - condition: ${agencyId === "all"} next: getAllChunks - next: getChunkIdsByAgency - -getChunkIdsByAgency: - call: http.post - args: - url: "[#GLOBAL_CLASSIFIER_RESQL]/get-chunk-ids-by-agency" - body: - agencyId: ${agencyId} - result: chunkIdsResult - next: return_result + next: getChunksAndAgencies getAllChunks: call: http.post @@ -51,11 +42,34 @@ getAllChunks: result: dataChunksResult next: return_chunk_data +getChunksAndAgencies: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-chunks-and-agencies" + body: + datasetId: ${datasetId} + result: chunksResult + next: getPaginatedChunkIds + +getPaginatedChunkIds: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_DMAPPER]/hbs/global-classifier/get_paginated_chunk_ids" + headers: + type: json + body: + chunks: ${chunksResult.response.body} + agencyId: ${agencyId} + pageNum: ${pageNum} + result: paginatedChunksResult + next: return_paginated_result + return_chunk_data: return: ${dataChunksResult.response.body} status: 200 next: end -return_result: - return: ${chunkIdsResult.response.body} +return_paginated_result: + return: ${paginatedChunksResult.response.body} + status: 200 next: end \ No newline at end of file From 2f8e8acd7602a856123087fa5b805ec5c7edbb22 Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Wed, 25 Jun 2025 23:56:23 +0530 Subject: [PATCH 053/195] updated queueing method architcture --- docs/gc_model_training_module.drawio | 756 ++++++++++++++++++--------- 1 file changed, 501 insertions(+), 255 deletions(-) diff --git a/docs/gc_model_training_module.drawio b/docs/gc_model_training_module.drawio index 7effc396..963cd8a4 100644 --- a/docs/gc_model_training_module.drawio +++ b/docs/gc_model_training_module.drawio @@ -1,47 +1,44 @@ - + - + - - - - + - + - + - + - + - + - + - + - + @@ -97,36 +94,41 @@ - + + + + + + - + - + - + - + - + - + - + @@ -135,33 +137,43 @@ - + - + - + - + + + + + + + + + + + - + - + - + - + @@ -172,48 +184,69 @@ + + + + + - + - + + + + + + + - - - + + - - + + + + + + + + + + - + + + + + + + + + + - + - - - - - - - + - + - + @@ -221,63 +254,52 @@ - - - - - - - - - + - + - + - + - + - + - - - - + - + - + - + - + - + - - + + @@ -331,55 +353,55 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -412,87 +434,78 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + - + - + - + - + - - + + - + @@ -502,20 +515,23 @@ + + + - + - + - + - - + + @@ -527,30 +543,30 @@ - + - + - + - + - - + + - - + + @@ -560,9 +576,9 @@ - - - + + + @@ -570,28 +586,28 @@ - + - + - + - + - + - + - + @@ -599,107 +615,77 @@ - - - + + + - + - - - - - - - - - - - - - + - + - + - + - - + + - + - - - - - - - - - - - - - - - - - - - + - + - - + + - + - + - + - + - + - + @@ -708,168 +694,428 @@ - + - + - + + + + + + + + + - + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - + + - + - + - + - + - + - - - - - - - - - - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From a38e46d16e2e3ea370413ec9d585b0c9f29dcf68 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Thu, 26 Jun 2025 01:27:54 +0530 Subject: [PATCH 054/195] starting integrating --- .../DSL/fetch_chunk_without_filter.yml | 5 + DSL/CronManager/DSL/fetch_multi_chunk.yml | 5 + DSL/CronManager/DSL/test_pipeline.yml | 5 + DSL/CronManager/script/callback_format.sh | 80 ++- DSL/CronManager/script/callback_format_v1.sh | 101 ++++ DSL/CronManager/script/dataset_pipeline_s3.sh | 107 ++-- DSL/CronManager/script/fetch_multi_chunk.sh | 120 ++++ DSL/CronManager/script/fetch_single_chunk.sh | 95 +++ DSL/CronManager/script/python_test_script.sh | 19 + DSL/DatasetGenerator/config/config.yaml | 40 +- .../prompts/institute_topic_question.txt | 56 +- .../global-classifier/POST/data/generate.yml | 2 - .../global-classifier/POST/data/test.yml | 69 +++ docker-compose.yml | 95 +-- minio_presigned_urls.txt | 10 + minio_signed_urls.py | 65 ++ .../Dockerfile | 16 +- src/dataset_file_hanlder/__init__.py | 0 .../chunks_handler_api.py | 93 +++ src/dataset_file_hanlder/config/__init__.py | 0 src/dataset_file_hanlder/config/settings.py | 27 + .../download_source_dataset.py | 116 ++++ .../fetch_chunk_without_filter.py | 189 ++++++ src/dataset_file_hanlder/fetch_multi_chunk.py | 318 ++++++++++ src/dataset_file_hanlder/handlers/__init__.py | 0 .../handlers/response_handler.py | 41 ++ src/dataset_file_hanlder/models/__init__.py | 0 src/dataset_file_hanlder/models/schemas.py | 69 +++ .../multiple_chunk_handler.py | 234 +++++++ .../requirements.txt | 5 +- src/dataset_file_hanlder/services/__init__.py | 0 .../services/download_service.py | 114 ++++ .../services/extraction_service.py | 138 +++++ .../services/s3_ferry_service.py | 147 +++++ .../services/url_decoder_service.py | 38 ++ .../single_chunk_handler.py | 118 ++++ .../dataset_generation_callback_processor.py | 571 ++++++++++++++++++ ...ataset_generation_callback_processor_v1.py | 201 ++++++ .../download_source_dataset.py | 116 ++++ .../fetch_chunk_without_filter.py | 189 ++++++ src/s3_dataset_processor/fetch_multi_chunk.py | 318 ++++++++++ src/s3_dataset_processor/models/schemas.py | 36 +- src/s3_dataset_processor/s3_processor_api.py | 269 --------- .../services/download_service.py | 19 +- .../services/extraction_service.py | 18 +- .../services/s3_ferry_service.py | 147 +++++ 46 files changed, 3998 insertions(+), 423 deletions(-) create mode 100644 DSL/CronManager/DSL/fetch_chunk_without_filter.yml create mode 100644 DSL/CronManager/DSL/fetch_multi_chunk.yml create mode 100644 DSL/CronManager/DSL/test_pipeline.yml create mode 100644 DSL/CronManager/script/callback_format_v1.sh create mode 100644 DSL/CronManager/script/fetch_multi_chunk.sh create mode 100644 DSL/CronManager/script/fetch_single_chunk.sh create mode 100644 DSL/CronManager/script/python_test_script.sh create mode 100644 DSL/Ruuter.public/global-classifier/POST/data/test.yml create mode 100644 minio_presigned_urls.txt create mode 100644 minio_signed_urls.py rename src/{s3_dataset_processor => dataset_file_hanlder}/Dockerfile (60%) create mode 100644 src/dataset_file_hanlder/__init__.py create mode 100644 src/dataset_file_hanlder/chunks_handler_api.py create mode 100644 src/dataset_file_hanlder/config/__init__.py create mode 100644 src/dataset_file_hanlder/config/settings.py create mode 100644 src/dataset_file_hanlder/download_source_dataset.py create mode 100644 src/dataset_file_hanlder/fetch_chunk_without_filter.py create mode 100644 src/dataset_file_hanlder/fetch_multi_chunk.py create mode 100644 src/dataset_file_hanlder/handlers/__init__.py create mode 100644 src/dataset_file_hanlder/handlers/response_handler.py create mode 100644 src/dataset_file_hanlder/models/__init__.py create mode 100644 src/dataset_file_hanlder/models/schemas.py create mode 100644 src/dataset_file_hanlder/multiple_chunk_handler.py rename src/{s3_dataset_processor => dataset_file_hanlder}/requirements.txt (80%) create mode 100644 src/dataset_file_hanlder/services/__init__.py create mode 100644 src/dataset_file_hanlder/services/download_service.py create mode 100644 src/dataset_file_hanlder/services/extraction_service.py create mode 100644 src/dataset_file_hanlder/services/s3_ferry_service.py create mode 100644 src/dataset_file_hanlder/services/url_decoder_service.py create mode 100644 src/dataset_file_hanlder/single_chunk_handler.py create mode 100644 src/s3_dataset_processor/dataset_generation_callback_processor.py create mode 100644 src/s3_dataset_processor/dataset_generation_callback_processor_v1.py create mode 100644 src/s3_dataset_processor/download_source_dataset.py create mode 100644 src/s3_dataset_processor/fetch_chunk_without_filter.py create mode 100644 src/s3_dataset_processor/fetch_multi_chunk.py delete mode 100644 src/s3_dataset_processor/s3_processor_api.py create mode 100644 src/s3_dataset_processor/services/s3_ferry_service.py diff --git a/DSL/CronManager/DSL/fetch_chunk_without_filter.yml b/DSL/CronManager/DSL/fetch_chunk_without_filter.yml new file mode 100644 index 00000000..6f12fb8e --- /dev/null +++ b/DSL/CronManager/DSL/fetch_chunk_without_filter.yml @@ -0,0 +1,5 @@ +fetch_single_chunk: + trigger: off + type: exec + command: "../app/scripts/fetch_single_chunk.sh" + allowedEnvs: ['datasetId', 'pageNum'] \ No newline at end of file diff --git a/DSL/CronManager/DSL/fetch_multi_chunk.yml b/DSL/CronManager/DSL/fetch_multi_chunk.yml new file mode 100644 index 00000000..f52a735e --- /dev/null +++ b/DSL/CronManager/DSL/fetch_multi_chunk.yml @@ -0,0 +1,5 @@ +multi_chunk: + trigger: off + type: exec + command: "../app/scripts/fetch_multi_chunk.sh" + allowedEnvs: ['datasetId', 'chunkIds'] \ No newline at end of file diff --git a/DSL/CronManager/DSL/test_pipeline.yml b/DSL/CronManager/DSL/test_pipeline.yml new file mode 100644 index 00000000..69b08e98 --- /dev/null +++ b/DSL/CronManager/DSL/test_pipeline.yml @@ -0,0 +1,5 @@ +test_job: + trigger: off + type: exec + command: "../app/scripts/python_test_script.sh" + allowedEnvs: ['testParam', 'delaySeconds'] \ No newline at end of file diff --git a/DSL/CronManager/script/callback_format.sh b/DSL/CronManager/script/callback_format.sh index 2022bb65..f8d07dc1 100644 --- a/DSL/CronManager/script/callback_format.sh +++ b/DSL/CronManager/script/callback_format.sh @@ -12,6 +12,17 @@ fi log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" } +# Debug: Check Python environment +log "🔍 Python version: $(python3 --version)" +log "🔍 Python path: $(which python3)" + +# Install required packages +log "🔍 Installing required Python packages..." +python3 -m pip install --quiet --no-cache-dir requests pydantic || { + log "❌ Failed to install packages" + exit 1 +} +log "✅ Required packages installed" log "Dataset generation callback processing started" log "File path: $filePath" @@ -21,29 +32,39 @@ log "Encoded results length: ${#results} characters" dataset_id=$(echo "$filePath" | grep -o '/[^/]*\.json$' | sed 's|/\([^/]*\)\.json$|\1|' || echo "unknown") log "Extracted dataset ID: $dataset_id" -# API endpoint for processing generation callback -API_URL="http://s3-dataset-processor:8001/process-generation-callback" +# Direct Python script path for processing generation callback (inside container) +CALLBACK_SCRIPT="/app/src/s3_dataset_processor/dataset_generation_callback_processor.py" -log "🔍 Calling S3 Dataset Processor API to process generation callback..." +log "🔍 Calling direct Python script to process generation callback..." -# Call the API to process generation callback (background processing) -response=$(curl -s -o /tmp/callback_response_body.txt -w "%{http_code}" -X POST "$API_URL" \ - -H "Content-Type: application/json" \ - -d "{\"file_path\":\"$filePath\", \"results\":\"$results\"}") +# Create temporary file for response +temp_response="/tmp/callback_response.json" -http_code="$response" -response_body=$(cat /tmp/callback_response_body.txt) +# Call the direct Python script instead of API endpoint +python3 "$CALLBACK_SCRIPT" \ + --file-path "$filePath" \ + --encoded-results "$results" \ + --output-json "$temp_response" -log "🔍 HTTP Status Code: $http_code" -log "🔍 Response Body: $response_body" +exit_code=$? +log "🔍 Python script exit code: $exit_code" + +if [ -f "$temp_response" ]; then + log "📄 Contents of output JSON:" + cat "$temp_response" +else + log "⚠️ No output JSON file was generated." +fi -# Check if API call was successful (should get 200 immediately) -if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then - log "✅ Callback processing request accepted successfully" +# Check if script execution was successful +if [ "$exit_code" -eq 0 ] && [ -f "$temp_response" ]; then + log "✅ Python script execution successful" + + response_body=$(cat "$temp_response") + log "🔍 Response: $response_body" # Parse the response to get status information if command -v jq >/dev/null 2>&1; then - # Use jq if available status=$(echo "$response_body" | jq -r '.status // "unknown"') message=$(echo "$response_body" | jq -r '.message // "unknown"') @@ -65,37 +86,32 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then log " - Dataset ID: $dataset_id" fi - # Check if callback processing was accepted - if [ "$status" = "accepted" ]; then - log "✅ Dataset generation callback submitted for background processing" - log "🔄 Background task will create the following payload structure:" + # Check if callback processing was completed + if [ "$status" = "completed" ]; then + log "✅ Dataset generation callback processed successfully" + log "🔄 Callback payload has been sent to status update endpoint" log " - agencies: [{agencyId: X, syncStatus: Synced_with_CKB/Sync_with_CKB_Failed}, ...]" log " - datasetId: $dataset_id" log " - generationStatus: Generation_Success/Generation_Failed" - log "📋 Note: Actual callback processing is happening in the background" - log "📋 Check the S3 processor service logs for detailed processing results" - else log "⚠️ Unexpected status received: $status" log "⚠️ Message: $message" fi -else - log "❌ Callback processing request failed" - log "HTTP Status: $http_code" - log "Response: $response_body" + # Cleanup temp file + rm -f "$temp_response" - # Clean up temp files - rm -f /tmp/callback_response_body.txt +else + log "❌ Python script execution failed with exit code: $exit_code" + if [ -f "$temp_response" ]; then + log "Error response: $(cat $temp_response)" + rm -f "$temp_response" + fi exit 1 fi -# Clean up temp files -rm -f /tmp/callback_response_body.txt - log "✅ Dataset generation callback processing completed successfully" log "📋 Summary: Dataset ID: $dataset_id, Request Status: $status" -log "📋 Background processing will generate the final callback payload" exit 0 \ No newline at end of file diff --git a/DSL/CronManager/script/callback_format_v1.sh b/DSL/CronManager/script/callback_format_v1.sh new file mode 100644 index 00000000..2022bb65 --- /dev/null +++ b/DSL/CronManager/script/callback_format_v1.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +echo "Started Shell Script for Dataset Generation Callback Processing" + +# Check if environment variables are set +if [ -z "$filePath" ] || [ -z "$results" ]; then + echo "Please set the filePath and results environment variables." + exit 1 +fi + +# Logging function +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" +} + +log "Dataset generation callback processing started" +log "File path: $filePath" +log "Encoded results length: ${#results} characters" + +# Extract dataset ID from file path for logging +dataset_id=$(echo "$filePath" | grep -o '/[^/]*\.json$' | sed 's|/\([^/]*\)\.json$|\1|' || echo "unknown") +log "Extracted dataset ID: $dataset_id" + +# API endpoint for processing generation callback +API_URL="http://s3-dataset-processor:8001/process-generation-callback" + +log "🔍 Calling S3 Dataset Processor API to process generation callback..." + +# Call the API to process generation callback (background processing) +response=$(curl -s -o /tmp/callback_response_body.txt -w "%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d "{\"file_path\":\"$filePath\", \"results\":\"$results\"}") + +http_code="$response" +response_body=$(cat /tmp/callback_response_body.txt) + +log "🔍 HTTP Status Code: $http_code" +log "🔍 Response Body: $response_body" + +# Check if API call was successful (should get 200 immediately) +if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then + log "✅ Callback processing request accepted successfully" + + # Parse the response to get status information + if command -v jq >/dev/null 2>&1; then + # Use jq if available + status=$(echo "$response_body" | jq -r '.status // "unknown"') + message=$(echo "$response_body" | jq -r '.message // "unknown"') + + log "📊 Callback Processing Status:" + log " - Status: $status" + log " - Message: $message" + log " - Dataset ID: $dataset_id" + + else + # Fallback parsing without jq + log "⚠️ jq not available, using grep/sed for parsing" + + status=$(echo "$response_body" | grep -o '"status":"[^"]*"' | sed 's/.*"status":"\([^"]*\)".*/\1/' || echo "unknown") + message=$(echo "$response_body" | grep -o '"message":"[^"]*"' | sed 's/.*"message":"\([^"]*\)".*/\1/' || echo "unknown") + + log "📊 Callback Processing Status:" + log " - Status: $status" + log " - Message: $message" + log " - Dataset ID: $dataset_id" + fi + + # Check if callback processing was accepted + if [ "$status" = "accepted" ]; then + log "✅ Dataset generation callback submitted for background processing" + log "🔄 Background task will create the following payload structure:" + log " - agencies: [{agencyId: X, syncStatus: Synced_with_CKB/Sync_with_CKB_Failed}, ...]" + log " - datasetId: $dataset_id" + log " - generationStatus: Generation_Success/Generation_Failed" + + log "📋 Note: Actual callback processing is happening in the background" + log "📋 Check the S3 processor service logs for detailed processing results" + + else + log "⚠️ Unexpected status received: $status" + log "⚠️ Message: $message" + fi + +else + log "❌ Callback processing request failed" + log "HTTP Status: $http_code" + log "Response: $response_body" + + # Clean up temp files + rm -f /tmp/callback_response_body.txt + exit 1 +fi + +# Clean up temp files +rm -f /tmp/callback_response_body.txt + +log "✅ Dataset generation callback processing completed successfully" +log "📋 Summary: Dataset ID: $dataset_id, Request Status: $status" +log "📋 Background processing will generate the final callback payload" + +exit 0 \ No newline at end of file diff --git a/DSL/CronManager/script/dataset_pipeline_s3.sh b/DSL/CronManager/script/dataset_pipeline_s3.sh index 939213d4..0d381b59 100644 --- a/DSL/CronManager/script/dataset_pipeline_s3.sh +++ b/DSL/CronManager/script/dataset_pipeline_s3.sh @@ -3,8 +3,8 @@ echo "Started Shell Script for S3 DataSet Processing" # Check if environment variable is set -if [ -z "$signedUrls" || [ -z "$datasetId" ]]; then - echo "Please set the signedUrls environment variable." +if [ -z "$signedUrls" ] || [ -z "$datasetId" ]; then + echo "Please set the signedUrls and datasetId environment variables." exit 1 fi @@ -15,33 +15,59 @@ log() { data_generation_request="$signedUrls" +# Install required Python packages if not present +echo "🔍 Installing required Python packages..." +python3 -m pip install --quiet --no-cache-dir requests pydantic || { + echo "❌ Failed to install packages" + exit 1 +} +echo "✅ Required packages installed" + log "S3 data processing request received" log "Encoded data length: ${#data_generation_request} characters" -# API endpoint for downloading datasets -API_URL="http://s3-dataset-processor:8001/download-datasets" +# Direct Python script path for downloading datasets (inside container) +DOWNLOAD_SCRIPT="/app/src/s3_dataset_processor/download_source_dataset.py" CURRENT_DATASET_ID="$datasetId" CURRENT_DATASET_ID=$(echo "$CURRENT_DATASET_ID" | tr -d '"') -log "🔍 Calling S3 Dataset Processor API to download files..." +log "🔍 Calling direct Python script to download files..." -# Call the API to download datasets with safe response parsing -response=$(curl -s -o /tmp/response_body.txt -w "%{http_code}" -X POST "$API_URL" \ - -H "Content-Type: application/json" \ - -d "{\"encoded_data\":\"$data_generation_request\", \"extract_files\": true}") +# Create temporary file for response +temp_response="/tmp/download_response.json" -http_code="$response" -response_body=$(cat /tmp/response_body.txt) +# Call the direct Python script instead of FastAPI +python3 "$DOWNLOAD_SCRIPT" \ + --encoded-data "$data_generation_request" \ + --extract-files \ + --output-json "$temp_response" -log "🔍 HTTP Status Code: $http_code" -log "🔍 Raw Response Body: $response_body" +exit_code=$? +log "🔍 Python script exit code: $exit_code" -# Check if API call was successful -if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then - log "✅ API call successful" +if [ -f "$temp_response" ]; then + log "📄 Contents of output JSON:" + cat "$temp_response" +else + log "⚠️ No output JSON file was generated." +fi + +# Check if script execution was successful +if [ "$exit_code" -eq 0 ] && [ -f "$temp_response" ]; then + log "✅ Python script execution successful" + + response_body=$(cat "$temp_response") + log "🔍 Response: $response_body" - # Check if the response indicates success using grep instead of jq - if printf "%s\n" "$response_body" | grep -q '"success":true'; then + # Improved JSON parsing - remove whitespace and check for success + # Use multiple methods to ensure we catch the success field + success_check1=$(echo "$response_body" | grep -o '"success"[[:space:]]*:[[:space:]]*true' | wc -l) + success_check2=$(echo "$response_body" | grep -o '"success":true' | wc -l) + success_check3=$(echo "$response_body" | tr -d ' \n\r\t' | grep -o '"success":true' | wc -l) + + log "🔍 Success check results: method1=$success_check1, method2=$success_check2, method3=$success_check3" + + if [ "$success_check1" -gt 0 ] || [ "$success_check2" -gt 0 ] || [ "$success_check3" -gt 0 ]; then success_status="true" else success_status="false" @@ -52,8 +78,9 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then if [ "$success_status" = "true" ]; then log "✅ S3 download and extraction successful" - # Get successful downloads count using grep and sed - successful_downloads=$(printf "%s\n" "$response_body" | grep -o '"successful_downloads":[0-9]*' | grep -o '[0-9]*' || echo "0") + # Get successful downloads count using improved parsing + successful_downloads=$(echo "$response_body" | grep -o '"successful_downloads"[[:space:]]*:[[:space:]]*[0-9]*' | grep -o '[0-9]*' | tail -1) + [ -z "$successful_downloads" ] && successful_downloads=0 log "Successfully downloaded and extracted $successful_downloads files" # Prepare dataset generation payload as a list @@ -66,15 +93,17 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then first_entry=true # Extract folder information and build the payload list + # Use improved parsing to handle the extracted_folders array if command -v jq >/dev/null 2>&1; then - # Use jq if available - printf "%s\n" "$response_body" | jq -r '.extracted_folders[] | "\(.agency_id):\(.folder_path)"' 2>/dev/null | while IFS=':' read -r agency_id folder_path; do - if [ -n "$agency_id" ] && [ -n "$folder_path" ]; then + # Use jq if available - more reliable + echo "$response_body" | jq -r '.extracted_folders[]? | "\(.agency_id):\(.agency_name):\(.folder_path)"' 2>/dev/null | while IFS=':' read -r agency_id agency_name folder_path; do + if [ -n "$agency_id" ] && [ -n "$agency_name" ] && [ -n "$folder_path" ]; then if [ "$first_entry" = false ]; then echo ',' >> "$temp_payload" fi echo " {" >> "$temp_payload" echo " \"agency_id\": \"$agency_id\"," >> "$temp_payload" + echo " \"agency_name\": \"$agency_name\"," >> "$temp_payload" echo " \"data_path\": \"$folder_path\"," >> "$temp_payload" echo " \"output_filename\": \"$CURRENT_DATASET_ID\"" >> "$temp_payload" echo " }" >> "$temp_payload" @@ -82,20 +111,23 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then fi done else - # Fallback parsing without jq + # Fallback parsing without jq - improved regex log "⚠️ jq not available, using grep/sed for parsing" - # Extract agency_id and folder_path pairs using grep and sed - printf "%s\n" "$response_body" | grep -o '"agency_id":"[^"]*","folder_path":"[^"]*"' | while read -r folder_info; do - agency_id=$(echo "$folder_info" | sed 's/.*"agency_id":"\([^"]*\)".*/\1/') - folder_path=$(echo "$folder_info" | sed 's/.*"folder_path":"\([^"]*\)".*/\1/') + # Clean the response body and extract agency_id and folder_path pairs + cleaned_response=$(echo "$response_body" | tr -d '\n\r\t' | tr -s ' ') + echo "$cleaned_response" | grep -o '"agency_id"[[:space:]]*:[[:space:]]*"[^"]*"[[:space:]]*,[[:space:]]*"agency_name"[[:space:]]*:[[:space:]]*"[^"]*"[[:space:]]*,[[:space:]]*"folder_path"[[:space:]]*:[[:space:]]*"[^"]*"' | while read -r folder_info; do + agency_id=$(echo "$folder_info" | sed 's/.*"agency_id"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') + agency_name=$(echo "$folder_info" | sed 's/.*"agency_name"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') + folder_path=$(echo "$folder_info" | sed 's/.*"folder_path"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') - if [ -n "$agency_id" ] && [ -n "$folder_path" ]; then + if [ -n "$agency_id" ] && [ -n "$agency_name" ] && [ -n "$folder_path" ]; then if [ "$first_entry" = false ]; then echo ',' >> "$temp_payload" fi echo " {" >> "$temp_payload" echo " \"agency_id\": \"$agency_id\"," >> "$temp_payload" + echo " \"agency_name\": \"$agency_name\"," >> "$temp_payload" echo " \"data_path\": \"$folder_path\"," >> "$temp_payload" echo " \"output_filename\": \"$CURRENT_DATASET_ID\"" >> "$temp_payload" echo " }" >> "$temp_payload" @@ -122,8 +154,6 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then dataset_http_code="$dataset_response" dataset_response_body=$(cat /tmp/dataset_response_body.txt) - log "🔍 Dataset Generation HTTP Status Code: $dataset_http_code" - log "🔍 Dataset Generation HTTP Status Code: $dataset_http_code" log "🔍 Dataset Generation Response: $dataset_response_body" @@ -136,7 +166,7 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then log "HTTP Status: $dataset_http_code" log "Error response: $dataset_response_body" # Cleanup temp files - rm -f /tmp/dataset_payload.json /tmp/dataset_response_body.txt /tmp/response_body.txt + rm -f /tmp/dataset_payload.json /tmp/dataset_response_body.txt /tmp/download_response.json exit 1 fi @@ -149,20 +179,21 @@ if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then else log "❌ S3 download failed - success status: $success_status" log "Response: $response_body" + rm -f /tmp/download_response.json exit 1 fi -elif [ "$http_code" != "200" ]; then - log "❌ API call failed with HTTP status: $http_code" - log "Response: $response_body" - exit 1 else - log "❌ API call failed - no response received" + log "❌ Python script execution failed with exit code: $exit_code" + if [ -f "$temp_response" ]; then + log "Error response: $(cat $temp_response)" + rm -f /tmp/download_response.json + fi exit 1 fi # Cleanup temp file -rm -f /tmp/response_body.txt +rm -f /tmp/download_response.json log "🎉 S3 Dataset Processing script completed successfully" log "📋 Note: Dataset generation is running as a background task" diff --git a/DSL/CronManager/script/fetch_multi_chunk.sh b/DSL/CronManager/script/fetch_multi_chunk.sh new file mode 100644 index 00000000..3f719e6c --- /dev/null +++ b/DSL/CronManager/script/fetch_multi_chunk.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +echo "Started Shell Script for Multi-Chunk Download and Aggregation" + +# Check if environment variables are set +if [ -z "$datasetId" ] || [ -z "$chunkIds" ]; then + echo "Please set the datasetId and chunkIds environment variables." + exit 1 +fi + +# Logging function +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >&2 +} + +log "Multi-chunk download request started" +log "Dataset ID: $datasetId" +log "Chunk IDs: $chunkIds" + +# Clean the parameters +DATASET_ID=$(echo "$datasetId" | tr -d '"') +CHUNK_IDS=$(echo "$chunkIds" | tr -d '"') + +log "Cleaned Dataset ID: $DATASET_ID" +log "Cleaned Chunk IDs: $CHUNK_IDS" + +# Validate chunk IDs format +if [[ ! "$CHUNK_IDS" =~ ^[0-9]+([[:space:]]+[0-9]+)*$ ]]; then + log "❌ Invalid chunk IDs format. Expected space-separated numbers." + error_response="{\"success\": false, \"dataset_id\": \"$DATASET_ID\", \"chunk_ids\": \"$CHUNK_IDS\", \"error\": \"Invalid chunk IDs format\", \"message\": \"Expected space-separated numbers like '1 2 3'\"}" + echo "$error_response" + exit 1 +fi + +# Create temp_chunks directory if it doesn't exist +mkdir -p /app/temp_chunks +log "Created/verified temp_chunks directory" + +# Install required Python packages if not present +log "🔍 Installing required Python packages..." +python3 -m pip install --quiet --no-cache-dir requests pydantic || { + log "❌ Failed to install packages" + exit 1 +} +log "✅ Required packages installed" + +# Direct Python script path for downloading multiple chunks (inside container) +DOWNLOAD_SCRIPT="/app/src/s3_dataset_processor/fetch_multi_chunk.py" + +log "🔍 Calling Python script to download and aggregate chunks..." + +# Create temporary file for response +temp_response="/tmp/multi_chunk_response.json" + +# Call the Python script +python3 "$DOWNLOAD_SCRIPT" \ + --dataset-id "$DATASET_ID" \ + --chunk-ids "$CHUNK_IDS" \ + --output-json "$temp_response" + +exit_code=$? +log "🔍 Python script exit code: $exit_code" + +if [ "$exit_code" -eq 0 ] && [ -f "$temp_response" ]; then + log "✅ Multi-chunk processing successful" + + response_body=$(cat "$temp_response") + + # Check if aggregation was successful + success_check=$(echo "$response_body" | grep -o '"success"[[:space:]]*:[[:space:]]*true' | wc -l) + + if [ "$success_check" -gt 0 ]; then + log "✅ Chunks aggregated successfully" + + # Extract summary information for logging + if command -v jq >/dev/null 2>&1; then + total_items=$(echo "$response_body" | jq -r '.download_summary.total_items_aggregated // 0' 2>/dev/null || echo "0") + successful_chunks=$(echo "$response_body" | jq -r '.download_summary.successful_downloads // 0' 2>/dev/null || echo "0") + failed_chunks=$(echo "$response_body" | jq -r '.download_summary.failed_downloads // 0' 2>/dev/null || echo "0") + + log "📊 Aggregation Summary:" + log " - Total items aggregated: $total_items" + log " - Successful chunk downloads: $successful_chunks" + log " - Failed chunk downloads: $failed_chunks" + else + log "📊 Multi-chunk aggregation completed (install jq for detailed summary)" + fi + + # Output the JSON response to stdout (this goes to CronManager caller) + cat "$temp_response" + + # Cleanup + rm -f "$temp_response" + + log "✅ Multi-chunk aggregation completed successfully" + exit 0 + else + log "❌ Multi-chunk aggregation failed - check response for details" + + # Still output the response so caller can see the error + cat "$temp_response" + + # Cleanup + rm -f "$temp_response" + exit 1 + fi + +else + log "❌ Python script execution failed with exit code: $exit_code" + + # Create error response + error_response="{\"success\": false, \"dataset_id\": \"$DATASET_ID\", \"chunk_ids\": \"$CHUNK_IDS\", \"error\": \"Script execution failed\", \"message\": \"Python script failed with exit code $exit_code\"}" + echo "$error_response" + + if [ -f "$temp_response" ]; then + log "Error response: $(cat $temp_response)" + rm -f "$temp_response" + fi + exit 1 +fi \ No newline at end of file diff --git a/DSL/CronManager/script/fetch_single_chunk.sh b/DSL/CronManager/script/fetch_single_chunk.sh new file mode 100644 index 00000000..77df4869 --- /dev/null +++ b/DSL/CronManager/script/fetch_single_chunk.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +echo "Started Shell Script for Chunk Download" + +# Check if environment variables are set +if [ -z "$datasetId" ] || [ -z "$pageNum" ]; then + echo "Please set the datasetId and pageNum environment variables." + exit 1 +fi + +# Logging function +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >&2 +} + +log "Chunk download request started" +log "Dataset ID: $datasetId" +log "Page Number: $pageNum" + +# Clean the parameters +DATASET_ID=$(echo "$datasetId" | tr -d '"') +PAGE_NUM=$(echo "$pageNum" | tr -d '"') + +log "Cleaned Dataset ID: $DATASET_ID" +log "Cleaned Page Number: $PAGE_NUM" + +# Install required Python packages if not present +log "🔍 Installing required Python packages..." +python3 -m pip install --quiet --no-cache-dir requests pydantic || { + log "❌ Failed to install packages" + exit 1 +} +log "✅ Required packages installed" + +# Direct Python script path for downloading chunk (inside container) +DOWNLOAD_SCRIPT="/app/src/s3_dataset_processor/fetch_chunk_without_filter.py" + +log "🔍 Calling Python script to download chunk..." + +# Create temporary file for response +temp_response="/tmp/chunk_response.json" + +# Call the Python script +python3 "$DOWNLOAD_SCRIPT" \ + --dataset-id "$DATASET_ID" \ + --page-num "$PAGE_NUM" \ + --output-json "$temp_response" + +exit_code=$? +log "🔍 Python script exit code: $exit_code" + +if [ "$exit_code" -eq 0 ] && [ -f "$temp_response" ]; then + log "✅ Chunk download successful" + + response_body=$(cat "$temp_response") + log "🔍 Response: $response_body" + + # Check if download was successful + success_check=$(echo "$response_body" | grep -o '"success"[[:space:]]*:[[:space:]]*true' | wc -l) + + if [ "$success_check" -gt 0 ]; then + log "✅ Chunk downloaded successfully" + + # Output the JSON response to stdout (this goes to CronManager caller) + cat "$temp_response" + + # Cleanup + rm -f "$temp_response" + + log "✅ Chunk download completed successfully" + exit 0 + else + log "❌ Chunk download failed - check response for details" + + # Still output the response so caller can see the error + cat "$temp_response" + + # Cleanup + rm -f "$temp_response" + exit 1 + fi + +else + log "❌ Python script execution failed with exit code: $exit_code" + + # Create error response + error_response="{\"success\": false, \"dataset_id\": \"$DATASET_ID\", \"page_num\": $PAGE_NUM, \"error\": \"Script execution failed\", \"message\": \"Python script failed with exit code $exit_code\"}" + echo "$error_response" + + if [ -f "$temp_response" ]; then + log "Error response: $(cat $temp_response)" + rm -f "$temp_response" + fi + exit 1 +fi \ No newline at end of file diff --git a/DSL/CronManager/script/python_test_script.sh b/DSL/CronManager/script/python_test_script.sh new file mode 100644 index 00000000..8f3474ee --- /dev/null +++ b/DSL/CronManager/script/python_test_script.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Redirect shell wrapper messages to stderr so only Python JSON goes to stdout +exec 2>&1 + +# Get parameters +TEST_PARAM="${testParam:-default}" +DELAY_SECONDS="${delaySeconds:-0}" + +# Export environment variables for Python script +export testParam="$TEST_PARAM" +export delaySeconds="$DELAY_SECONDS" + +# Execute Python script and capture ONLY its JSON output +# Redirect all shell logging to stderr, keep Python JSON on stdout +python3 /app/src/s3_dataset_processor/python_test.py 2>/dev/null + +# Exit with Python script's exit code +exit $? \ No newline at end of file diff --git a/DSL/DatasetGenerator/config/config.yaml b/DSL/DatasetGenerator/config/config.yaml index d08e37c9..d9c5b2e7 100644 --- a/DSL/DatasetGenerator/config/config.yaml +++ b/DSL/DatasetGenerator/config/config.yaml @@ -33,13 +33,24 @@ dataset_generation: prompt_template_name: "institute_topic_question" traversal_strategy: "recursive" output_format: "json" - num_samples: 10 + num_samples: 5 post_processing: "aggregation" # Options: "zip", "aggregation" # Aggregation-specific configuration (only used when post_processing = "aggregation") aggregation: output_filename: "12" merge_strategy: "combine_arrays" include_metadata: true + field_mapping: + enabled: true + payload_to_output: + agency_name: agency_name + agency_id: agency_id + defaults: + id: auto_increment + content_fields: + - question + +# Language and style settings for dataset generation parameters: language: "et" temperature: 0.7 @@ -47,6 +58,7 @@ dataset_generation: language_name: "Estonian" difficulty: "medium" style: "clear and concise" + system_prompt: "You are a helpful assistant for generating synthetic questions for given contexts." filter: {} # Processing settings @@ -70,4 +82,28 @@ data_sources: callback: url: "http://ruuter-public:8086/global-classifier/data/callback" max_retries: 3 - timeout: 30 \ No newline at end of file + timeout: 30 + +# Relevance Score Analysis +relevance_score: + enabled: true + embedding_model: "paraphrase-multilingual-mpnet-base-v2" + segment_weight: 0.6 + query_weight: 0.3 + term_weight: 0.1 + threshold_good: 0.7 + threshold_acceptable: 0.5 + min_df: 1 + max_df: 0.9 + ngram_range: (1, 2) + +# Information Coverage Analysis +information_coverage: + enabled: true + similarity_threshold: 0.5 + +# Model settings +models: + embedding_model: "paraphrase-multilingual-mpnet-base-v2" + qualitative_model: "google/gemma-2-2b-it" + use_4bit_quantization: true \ No newline at end of file diff --git a/DSL/DatasetGenerator/user_configs/prompts/institute_topic_question.txt b/DSL/DatasetGenerator/user_configs/prompts/institute_topic_question.txt index 58ae3b37..6958c041 100644 --- a/DSL/DatasetGenerator/user_configs/prompts/institute_topic_question.txt +++ b/DSL/DatasetGenerator/user_configs/prompts/institute_topic_question.txt @@ -1,23 +1,47 @@ ${system_prompt} -Generate a natural and contextually accurate question only in ${language_name} based on the following file content. +Generate a realistic user question in ${language_name} that someone would naturally ask a chatbot about the following topic. The question should clearly relate to the agency's services and demonstrate clear intent. -File: ${file_name} -Content: -""" -${file_content} -""" -Difficulty: ${difficulty:-medium} -Style: ${style:-clear and concise} +*Topic Information:* +Topic: ${file_name} +Content: """${file_content}""" -Instructions: -- The question must reflect genuine curiosity a learner might have after reading the file content. -- It should be directly based on the information presented — no assumptions or off-topic queries. -- Use clear, natural phrasing, as if a student is asking a teacher for clarification. +*Generation Parameters:* +- Difficulty Level: ${difficulty} +- Query Style: ${style} +- Language: ${language_name} -Format the output as a single JSON object: -{"question": "Your question here?"} +*Question Requirements:* -IMPORTANT: Return ONLY the raw JSON object with no markdown formatting, code block markers, or any other text. +*Natural User Language:* +- Use conversational, everyday language that real users employ +- Include common ways people phrase questions to chatbots +- Mix formal and informal expressions naturally +- Vary question length and structure -Additional Instructions: ${additional_instructions:-Ensure the question is relevant, precise, and clearly rooted in the file content in ${language_name}.} \ No newline at end of file +*Question Types:* +- Information requests about services and procedures +- Help-seeking for specific problems or processes +- Procedural questions about steps and requirements +- Problem-solving queries about issues or complications + +*Content Accuracy:* +- Base questions strictly on the provided topic content +- Focus on services, information, or processes the agency handles +- Ensure questions are within the agency's scope of expertise +- Avoid topics outside the institute's domain + +*Output Format:* +Format the output as a single JSON object + +IMPORTANT: Return ONLY a single JSON object with a 'question' field. Do not return an array or code block., each with a "question" field. Example: + {{"question": "What is ...?"}} + +*Critical Requirements:* +- No markdown formatting, code blocks, or explanatory text +- Use natural ${language_name} grammar and expressions +- Ensure valid JSON syntax +- Generate only questions appropriate for this specific agency + +*Additional Instructions* +Create questions that real users would ask when seeking help or information from this agency through a chatbot interface. \ No newline at end of file diff --git a/DSL/Ruuter.public/global-classifier/POST/data/generate.yml b/DSL/Ruuter.public/global-classifier/POST/data/generate.yml index 4fbfe37b..c45a5806 100644 --- a/DSL/Ruuter.public/global-classifier/POST/data/generate.yml +++ b/DSL/Ruuter.public/global-classifier/POST/data/generate.yml @@ -75,8 +75,6 @@ execute_cron_manager: signedUrls: ${encoded_urls} datasetId: ${dataset_id} result: cron_res - timeout: 30000 - limit: 1024 error: handle_cron_error next: validate_cron_response diff --git a/DSL/Ruuter.public/global-classifier/POST/data/test.yml b/DSL/Ruuter.public/global-classifier/POST/data/test.yml new file mode 100644 index 00000000..645cb282 --- /dev/null +++ b/DSL/Ruuter.public/global-classifier/POST/data/test.yml @@ -0,0 +1,69 @@ +declaration: + call: declare + version: 0.1 + description: "Test endpoint to check if CronManager waits for Python script completion" + method: post + accepts: json + returns: json + namespace: global-classifier + +log_request_1: + log: "🐍 [PYTHON COMPLETION TEST] Starting CronManager Python completion test" + next: log_request_2 + +log_request_2: + log: "🐍 [PYTHON COMPLETION TEST] This test checks if we get response AFTER Python script completes" + next: call_cron_manager + +call_cron_manager: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_CRON_MANAGER]/execute/test_pipeline/test_job" + query: + testParam: "python-completion-test" + delaySeconds: "8" + timeout: 60000 + result: cron_response + next: log_response_1 + +log_response_1: + log: "🐍 [PYTHON COMPLETION TEST] CronManager response received - Status: ${cron_response.response.statusCodeValue}" + next: log_response_2 + +log_response_2: + log: "🐍 [PYTHON COMPLETION TEST] Response body: ${cron_response.response.body}" + next: log_response_3 + +log_response_3: + log: "🐍 [PYTHON COMPLETION TEST] If this appears after 8+ seconds, CronManager waits for completion" + next: log_response_4 + +log_response_4: + log: "🐍 [PYTHON COMPLETION TEST] If this appears immediately (~1s), CronManager runs in background" + next: parse_python_response + +# log_response_5: +# log: ${cron_response.response.body} +# next: parse_python_response + +parse_python_response: + assign: + python_output: ${cron_response.response.body} + response_status: ${cron_response.response.statusCodeValue} + test_result: + testType: "Python Script Completion Test" + message: "Test completed - check timing and response content" + cronManagerResponse: + statusCode: ${response_status} + body: ${python_output} + testParameters: + delaySeconds: 8 + testParam: "python-completion-test" + timeout: 60000 + timing: + description: "If response contains Python script results with timing info, CronManager waited for completion" + expectation: "Response should contain start_time, end_time, and duration from Python script" + next: return_result + +return_result: + return: ${test_result} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index b3fcbb37..3d5e78e7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -58,22 +58,22 @@ services: networks: - bykstack - tim: - container_name: tim - image: tim - depends_on: - - tim-postgresql - environment: - - SECURITY_ALLOWLIST_JWT=ruuter-private,ruuter-public,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1 - - KEY_PASS=ppjjpp - ports: - - 8085:8085 - networks: - - bykstack - extra_hosts: - - "host.docker.internal:host-gateway" - cpus: "0.5" - mem_limit: "512M" + # tim: + # container_name: tim + # image: tim + # depends_on: + # - tim-postgresql + # environment: + # - SECURITY_ALLOWLIST_JWT=ruuter-private,ruuter-public,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1 + # - KEY_PASS=ppjjpp + # ports: + # - 8085:8085 + # networks: + # - bykstack + # extra_hosts: + # - "host.docker.internal:host-gateway" + # cpus: "0.5" + # mem_limit: "512M" tim-postgresql: container_name: tim-postgresql @@ -90,13 +90,13 @@ services: networks: - bykstack - authentication-layer: - container_name: authentication-layer - image: authentication-layer - ports: - - 3004:3004 - networks: - - bykstack + # authentication-layer: + # container_name: authentication-layer + # image: authentication-layer + # ports: + # - 3004:3004 + # networks: + # - bykstack resql: container_name: resql @@ -144,12 +144,13 @@ services: cron-manager: container_name: cron-manager - image: cron-manager:latest + image: cron-manager-python:latest user: "root" volumes: - ./DSL/CronManager/DSL:/DSL - ./DSL/CronManager/script:/app/scripts - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + - ./DSL/DatasetGenerator/temp_chunks:/app/temp_chunks - ./src/s3_dataset_processor:/app/src/s3_dataset_processor - ./DSL/DatasetGenerator/config:/app/config - cron_data:/app/data @@ -183,20 +184,20 @@ services: ports: - "11434:11434" environment: - # - NVIDIA_VISIBLE_DEVICES=all - # - OLLAMA_USE_GPU=1 + - NVIDIA_VISIBLE_DEVICES=all + - OLLAMA_USE_GPU=1 - OLLAMA_HOST=0.0.0.0 volumes: - dataset_gen_ollama_models:/root/.ollama - - ./src/dataset-generation/ollama-entrypoint.sh:/ollama-entrypoint.sh + - ./DSL/DatasetGenerator/ollama-entrypoint.sh:/ollama-entrypoint.sh entrypoint: ["bash", "/ollama-entrypoint.sh"] - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: [gpu] + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] networks: - bykstack @@ -267,6 +268,7 @@ services: container_name: gc-s3-ferry volumes: - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + - ./DSL/DatasetGenerator/temp_chunks:/app/temp_chunks env_file: - config.env ports: @@ -275,22 +277,33 @@ services: networks: - bykstack - s3-dataset-processor: - container_name: s3-dataset-processor - build: ./src/s3_dataset_processor + dataset-file-handler: + container_name: dataset-file-handler + build: + context: ./src/dataset_file_hanlder + dockerfile: Dockerfile ports: - - "8001:8001" + - "8003:8000" volumes: - - cron_data:/app/data # Same volume as cron-manager + - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + - ./DSL/DatasetGenerator/temp_chunks:/app/temp_chunks + - ./src/dataset_file_hanlder:/app/src + - cron_data:/app/data environment: - - PORT=8001 + - PORT=8000 + - PYTHONPATH=/app/src + - LOG_LEVEL=INFO networks: - bykstack + depends_on: + - gc-s3-ferry + - resql healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8001/health"] + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 + restart: unless-stopped # gui: # container_name: gui diff --git a/minio_presigned_urls.txt b/minio_presigned_urls.txt new file mode 100644 index 00000000..ac9ad656 --- /dev/null +++ b/minio_presigned_urls.txt @@ -0,0 +1,10 @@ +http://minio:9000/ckb/agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=94db13ede49dd83ec7007477084e784eef7affebe55581f69e8ee0c923b6bdd1|||http://minio:9000/ckb/agencies/ID.ee/ID.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=3cd531c069dfe1a03df04e7d4cdab185d7aadbfedfc729db28be73e0a35c60fd + +Individual URLs: +================================================== +URL 1: +http://minio:9000/ckb/agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=94db13ede49dd83ec7007477084e784eef7affebe55581f69e8ee0c923b6bdd1 + +URL 2: +http://minio:9000/ckb/agencies/ID.ee/ID.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=3cd531c069dfe1a03df04e7d4cdab185d7aadbfedfc729db28be73e0a35c60fd + diff --git a/minio_signed_urls.py b/minio_signed_urls.py new file mode 100644 index 00000000..26395082 --- /dev/null +++ b/minio_signed_urls.py @@ -0,0 +1,65 @@ +import boto3 +from botocore.client import Config + +# Create S3 client for MinIO +s3_client = boto3.client( + 's3', + endpoint_url='http://minio:9000', # Replace with your MinIO URL + aws_access_key_id='minioadmin', # Replace with your access key + aws_secret_access_key='minioadmin', # Replace with your secret key + config=Config(signature_version='s3v4'), # Hardcoded signature version + region_name='us-east-1' # MinIO usually works with any region +) + +# List of files to generate URLs for +files_to_process = [ + {'bucket': 'ckb', 'key': 'agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip'}, + {'bucket': 'ckb', 'key': 'agencies/ID.ee/ID.zip'}, + # Add more files as needed + # {'bucket': 'ckb', 'key': 'agencies/another-agency/file.zip'}, +] + +# Generate presigned URLs +presigned_urls = [] + +print("Generating presigned URLs...") +for file_info in files_to_process: + try: + url = s3_client.generate_presigned_url( + ClientMethod='get_object', + Params={'Bucket': file_info['bucket'], 'Key': file_info['key']}, + ExpiresIn=24 * 3600 # 4 hours in seconds + ) + presigned_urls.append(url) + print(f"✅ Generated URL for: {file_info['key']}") + print(f" URL: {url}") + except Exception as e: + print(f"❌ Failed to generate URL for: {file_info['key']}") + print(f" Error: {str(e)}") + +output_file = 'minio_presigned_urls.txt' + +try: + with open(output_file, 'w') as f: + # Write URLs separated by ||| delimiter (for your script) + url_string = '|||'.join(presigned_urls) + f.write(url_string) + f.write('\n\n') + + # Also write each URL on separate lines for readability + f.write("Individual URLs:\n") + f.write("=" * 50 + "\n") + for i, url in enumerate(presigned_urls, 1): + f.write(f"URL {i}:\n{url}\n\n") + + print(f"\n✅ Presigned URLs saved to: {output_file}") + print(f"Total URLs generated: {len(presigned_urls)}") + + # Display the combined URL string for easy copying + if presigned_urls: + print("\nCombined URL string (for signedUrls environment variable):") + print("=" * 60) + print('|||'.join(presigned_urls)) + +except Exception as e: + print(f"❌ Failed to save URLs to file: {str(e)}") \ No newline at end of file diff --git a/src/s3_dataset_processor/Dockerfile b/src/dataset_file_hanlder/Dockerfile similarity index 60% rename from src/s3_dataset_processor/Dockerfile rename to src/dataset_file_hanlder/Dockerfile index 33a674cb..5773ab35 100644 --- a/src/s3_dataset_processor/Dockerfile +++ b/src/dataset_file_hanlder/Dockerfile @@ -13,15 +13,17 @@ COPY requirements.txt . # Install Python dependencies RUN pip install --no-cache-dir -r requirements.txt -# Copy application code -COPY . . +# Copy source code +COPY . /app/src + +# Set Python path +ENV PYTHONPATH=/app/src + -# Expose port -EXPOSE 8001 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8001/health || exit 1 + CMD curl -f http://localhost:8000/health || exit 1 -# Run the application - note the simplified module path -CMD ["uvicorn", "s3_processor_api:app", "--host", "0.0.0.0", "--port", "8001"] \ No newline at end of file +# Run the FastAPI application +CMD ["uvicorn", "chunks_handler_api:app", "--host", "0.0.0.0", "--port", "8000", "--reload"] \ No newline at end of file diff --git a/src/dataset_file_hanlder/__init__.py b/src/dataset_file_hanlder/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/dataset_file_hanlder/chunks_handler_api.py b/src/dataset_file_hanlder/chunks_handler_api.py new file mode 100644 index 00000000..3df10f2f --- /dev/null +++ b/src/dataset_file_hanlder/chunks_handler_api.py @@ -0,0 +1,93 @@ +"""FastAPI endpoints for chunk download operations.""" + +from fastapi import FastAPI, HTTPException +from loguru import logger + +from models.schemas import ( + ChunkDownloadRequest, + ChunkDownloadResponse, + MultiChunkDownloadRequest, + MultiChunkDownloadResponse +) +from single_chunk_handler import ChunkService +from multiple_chunk_handler import MultiChunkService + +# Create FastAPI app +app = FastAPI( + title="Dataset File Handler API", + description="API for handling dataset file operations including chunk downloads", + version="1.0.0", +) + +# Initialize services +chunk_service = ChunkService() +multi_chunk_service = MultiChunkService() + + +@app.post("/download-chunk", response_model=ChunkDownloadResponse) +async def download_chunk(request: ChunkDownloadRequest): + """ + Download a single chunk from S3. + + Args: + request: Chunk download request containing dataset_id and page_num + + Returns: + Chunk data or error information + """ + try: + logger.info(f"Chunk download request - Dataset: {request.dataset_id}, Page: {request.page_num}") + + result = chunk_service.download_chunk_from_s3(request.dataset_id, request.page_num) + + if not result["success"]: + raise HTTPException(status_code=400, detail=result) + + return ChunkDownloadResponse(**result) + + except HTTPException: + raise + except Exception as e: + logger.error(f"Internal error during chunk download: {str(e)}") + raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") + + +@app.post("/download-multiple-chunks", response_model=MultiChunkDownloadResponse) +async def download_multiple_chunks(request: MultiChunkDownloadRequest): + """ + Download and aggregate multiple chunks from S3. + + Args: + request: Multi-chunk download request containing dataset_id and chunk_ids + + Returns: + Aggregated chunk data or error information + """ + try: + logger.info(f"Multi-chunk download request - Dataset: {request.dataset_id}, Chunks: {request.chunk_ids}") + + if not request.chunk_ids: + raise HTTPException(status_code=400, detail="No chunk IDs provided") + + result = multi_chunk_service.download_multiple_chunks(request.dataset_id, request.chunk_ids) + + if not result["success"]: + raise HTTPException(status_code=400, detail=result) + + return MultiChunkDownloadResponse(**result) + + except HTTPException: + raise + except Exception as e: + logger.error(f"Internal error during multi-chunk download: {str(e)}") + raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") + + +@app.get("/health") +async def chunk_health_check(): + """Health check endpoint for chunk services.""" + return {"status": "healthy", "service": "chunk-download"} + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/src/dataset_file_hanlder/config/__init__.py b/src/dataset_file_hanlder/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/dataset_file_hanlder/config/settings.py b/src/dataset_file_hanlder/config/settings.py new file mode 100644 index 00000000..9ccc2b67 --- /dev/null +++ b/src/dataset_file_hanlder/config/settings.py @@ -0,0 +1,27 @@ +"""Application configuration settings.""" + +import os + + +class Settings: + """Application settings and configuration.""" + + # API Configuration + API_TITLE = "S3 Dataset Processor API" + API_DESCRIPTION = "API for decoding and processing S3 presigned URLs" + API_VERSION = "1.0.0" + + # Directory Configuration + DATA_DIR = "/app/data" + + # Download Configuration + DOWNLOAD_TIMEOUT = 300 # 5 minutes + CHUNK_SIZE = 8192 + + def __init__(self): + """Initialize settings and create necessary directories.""" + os.makedirs(self.DATA_DIR, exist_ok=True) + + +# Global settings instance +settings = Settings() diff --git a/src/dataset_file_hanlder/download_source_dataset.py b/src/dataset_file_hanlder/download_source_dataset.py new file mode 100644 index 00000000..eeab901c --- /dev/null +++ b/src/dataset_file_hanlder/download_source_dataset.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +# filepath: c:\Users\charith.bimsara_root\Rootcode\Estonian-Gov-AI\New\Global-Classifier\src\s3_dataset_processor\download_source_dataset.py +""" +Direct Python script for downloading datasets from S3 signed URLs. +Replaces the FastAPI /download-datasets endpoint for CronManager execution. +""" +import sys +import json +import argparse +import logging +from pathlib import Path +import traceback + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules FIRST +# This path corresponds to the volume mount in docker-compose.yml +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +# Now import the services AFTER adding to path +try: + from services.url_decoder_service import URLDecoderService + from services.download_service import DownloadService + from services.extraction_service import ExtractionService + from handlers.response_handler import ResponseHandler + logger.info("✅ Successfully imported all required modules") +except ImportError as e: + logger.error(f"❌ Failed to import required modules: {e}") + logger.error(f"Python path: {sys.path}") + logger.error(f"Script directory exists: {script_dir.exists()}") + if script_dir.exists(): + logger.error(f"Contents of script directory: {list(script_dir.iterdir())}") + sys.exit(1) + +def main(): + """Main function to handle dataset download process.""" + parser = argparse.ArgumentParser(description='Download datasets from S3 signed URLs') + parser.add_argument('--encoded-data', required=True, help='Base64 encoded signed URLs data') + parser.add_argument('--extract-files', action='store_true', default=True, help='Extract downloaded files') + parser.add_argument('--output-json', help='Output file path for results JSON') + + args = parser.parse_args() + + try: + if not args.encoded_data or not isinstance(args.encoded_data, str): + logger.error("'encoded_data' must be a non-empty string") + sys.exit(1) + + logger.info("Initializing services...") + # Initialize services + url_decoder_service = URLDecoderService() + download_service = DownloadService() + extraction_service = ExtractionService() + response_handler = ResponseHandler() + + logger.info("Decoding signed URLs...") + # Decode the data using service + decoded_data = url_decoder_service.decode_signed_urls(args.encoded_data) + logger.info(f"Starting download for {len(decoded_data)} files") + + logger.info("Processing downloads...") + # Process downloads using service + downloaded_files, successful_downloads, failed_downloads = ( + download_service.process_downloads(decoded_data) + ) + + logger.info("Processing extractions...") + # Process extractions using service + extracted_folders = extraction_service.process_extractions( + downloaded_files, args.extract_files + ) + + logger.info("Formatting response...") + # Format response using handler + response = response_handler.format_download_response( + decoded_data, + downloaded_files, + successful_downloads, + failed_downloads, + extracted_folders, + ) + + # Output results + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(response.dict(), f, indent=2) + logger.info(f"Results written to {args.output_json}") + else: + print(json.dumps(response.dict(), indent=2)) + + # Log summary + logger.info(f"Download completed: {successful_downloads} successful, {failed_downloads} failed") + logger.info(f"Extracted folders: {len(extracted_folders)}") + + # Exit with appropriate code + sys.exit(0 if response.success else 1) + + except ValueError as e: + logger.error(f"Decoding error: {str(e)}") + traceback.print_exc() + sys.exit(1) + except Exception as e: + logger.error(f"Internal error: {str(e)}") + traceback.print_exc() + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/dataset_file_hanlder/fetch_chunk_without_filter.py b/src/dataset_file_hanlder/fetch_chunk_without_filter.py new file mode 100644 index 00000000..2aa5e54d --- /dev/null +++ b/src/dataset_file_hanlder/fetch_chunk_without_filter.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 +""" +Python script to download a specific chunk from S3 bucket and return as JSON. +Used by CronManager endpoint to fetch individual chunks. +""" +import sys +import json +import argparse +import logging +import os +import tempfile +from pathlib import Path +import traceback + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +def log(message): + """Log to stderr to keep stdout clean for JSON output""" + logger.info(f"🔍 [CHUNK DOWNLOAD] {message}") + +try: + from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() + log("Successfully imported S3FerryService") +except ImportError as e: + log(f"Failed to import S3FerryService: {e}") + sys.exit(1) + +def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: + """ + Download a specific chunk from S3 bucket. + + Args: + dataset_id: Dataset ID + page_num: Page number (chunk number) + + Returns: + Dictionary containing chunk data or error information + """ + try: + log(f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}") + + # Create temporary directory for download + temp_dir = tempfile.mkdtemp(prefix="chunk_download_") + log(f"Created temporary directory: {temp_dir}") + + # Define S3 source path and local destination + chunk_filename = f"{page_num}.json" + s3_source_path = f"{dataset_id}/{chunk_filename}" + local_dest_path = f"temp_chunks/{chunk_filename}" + + # Create the temp_chunks directory if it doesn't exist + temp_chunks_dir = "temp_chunks" + os.makedirs(temp_chunks_dir, exist_ok=True) + log(f"Created/verified temp directory: {temp_chunks_dir}") + + log(f"S3 source path: {s3_source_path}") + log(f"Local destination: {local_dest_path}") + + # Download chunk from S3 using S3Ferry service + response = s3_ferry_service.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3" + ) + + log(f"S3Ferry response status: {response.status_code}") + log(f"S3Ferry response body: {response.text}") + + if response.status_code in [200, 201]: + # Read the downloaded chunk file + local_file_path = f"/app/{local_dest_path}" + + if os.path.exists(local_file_path): + log(f"Successfully downloaded chunk to: {local_file_path}") + + # Read and parse the chunk data + with open(local_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Clean up the downloaded file + os.remove(local_file_path) + log(f"Cleaned up downloaded file: {local_file_path}") + + # Remove empty directory if it exists + try: + os.rmdir(os.path.dirname(local_file_path)) + except OSError: + pass # Directory not empty or doesn't exist + + return { + "success": True, + "dataset_id": dataset_id, + "page_num": page_num, + "chunk_data": chunk_data, + "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}" + } + else: + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": f"Downloaded file not found at: {local_file_path}", + "message": "File download completed but file not accessible" + } + else: + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": f"S3 download failed: HTTP {response.status_code}", + "response_body": response.text, + "message": f"Failed to download chunk {page_num} from S3" + } + + except Exception as e: + log(f"Error during chunk download: {str(e)}") + traceback.print_exc() + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": str(e), + "message": "Internal error during chunk download" + } + +def main(): + """Main function to handle chunk download process.""" + parser = argparse.ArgumentParser(description='Download a specific chunk from S3') + parser.add_argument('--dataset-id', required=True, help='Dataset ID') + parser.add_argument('--page-num', required=True, type=int, help='Page number (chunk number)') + parser.add_argument('--output-json', help='Output file path for results JSON') + + args = parser.parse_args() + + try: + log(f"Processing chunk download request - Dataset: {args.dataset_id}, Page: {args.page_num}") + + # Download the chunk + result = download_chunk_from_s3(args.dataset_id, args.page_num) + + # Output results + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(result, f, indent=2) + log(f"Results written to {args.output_json}") + else: + # Output ONLY the JSON to stdout (this goes to CronManager) + print(json.dumps(result)) + + log(f"Chunk download completed - Success: {result['success']}") + + # Exit with appropriate code + sys.exit(0 if result['success'] else 1) + + except Exception as e: + log(f"Internal error: {str(e)}") + traceback.print_exc() + + error_result = { + "success": False, + "dataset_id": args.dataset_id, + "page_num": args.page_num, + "error": str(e), + "message": "Script execution failed" + } + + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(error_result, f, indent=2) + else: + print(json.dumps(error_result)) + + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/dataset_file_hanlder/fetch_multi_chunk.py b/src/dataset_file_hanlder/fetch_multi_chunk.py new file mode 100644 index 00000000..18cf8228 --- /dev/null +++ b/src/dataset_file_hanlder/fetch_multi_chunk.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 +""" +Python script to download multiple chunks from S3 bucket, aggregate them, and return as JSON. +Used by CronManager endpoint to fetch and combine multiple chunks. +""" +import sys +import json +import argparse +import logging +import os +import tempfile +from pathlib import Path +import traceback +from typing import List, Dict, Any + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +def log(message): + """Log to stderr to keep stdout clean for JSON output""" + logger.info(f"📦 [MULTI CHUNK] {message}") + +try: + from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() + log("Successfully imported S3FerryService") +except ImportError as e: + log(f"Failed to import S3FerryService: {e}") + sys.exit(1) + +def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, Any]: + """ + Download a single chunk from S3 bucket. + + Args: + dataset_id: Dataset ID + chunk_id: Chunk ID/number + + Returns: + Dictionary containing chunk data or error information + """ + try: + log(f"Downloading chunk {chunk_id} from dataset {dataset_id}") + + # Define S3 source path and local destination + chunk_filename = f"{chunk_id}.json" + s3_source_path = f"{dataset_id}/{chunk_filename}" + local_dest_path = f"temp_chunks/{chunk_filename}" + + # Create the temp_chunks directory if it doesn't exist + temp_chunks_dir = "/app/temp_chunks" + os.makedirs(temp_chunks_dir, exist_ok=True) + + log(f"S3 source path: {s3_source_path}") + log(f"Local destination: {local_dest_path}") + + # Download chunk from S3 using S3Ferry service + response = s3_ferry_service.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3" + ) + + log(f"S3Ferry response status for chunk {chunk_id}: {response.status_code}") + + if response.status_code in [200, 201]: + # Read the downloaded chunk file + local_file_path = f"/app/{local_dest_path}" + + if os.path.exists(local_file_path): + log(f"Successfully downloaded chunk {chunk_id} to: {local_file_path}") + + # Read and parse the chunk data + with open(local_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Clean up the downloaded file + os.remove(local_file_path) + log(f"Cleaned up downloaded file: {local_file_path}") + + return { + "success": True, + "chunk_id": chunk_id, + "chunk_data": chunk_data, + "message": f"Successfully downloaded chunk {chunk_id}" + } + else: + return { + "success": False, + "chunk_id": chunk_id, + "error": f"Downloaded file not found at: {local_file_path}", + "message": f"Chunk {chunk_id} download completed but file not accessible" + } + else: + return { + "success": False, + "chunk_id": chunk_id, + "error": f"S3 download failed: HTTP {response.status_code}", + "response_body": response.text, + "message": f"Failed to download chunk {chunk_id} from S3" + } + + except Exception as e: + log(f"Error downloading chunk {chunk_id}: {str(e)}") + traceback.print_exc() + return { + "success": False, + "chunk_id": chunk_id, + "error": str(e), + "message": f"Internal error during chunk {chunk_id} download" + } + +def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, Any]: + """ + Download multiple chunks from S3 and aggregate them. + + Args: + dataset_id: Dataset ID + chunk_ids: List of chunk IDs to download + + Returns: + Dictionary containing aggregated chunk data or error information + """ + try: + log(f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}") + + download_results = [] + successful_chunks = [] + failed_chunks = [] + aggregated_data = [] + total_items = 0 + + # Download each chunk + for chunk_id in chunk_ids: + result = download_single_chunk_from_s3(dataset_id, chunk_id) + download_results.append(result) + + if result["success"]: + successful_chunks.append(chunk_id) + chunk_data = result["chunk_data"] + + # Extract data array from chunk + chunk_items = chunk_data.get("data", []) + aggregated_data.extend(chunk_items) + total_items += len(chunk_items) + + log(f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation") + else: + failed_chunks.append(chunk_id) + log(f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}") + + # Prepare chunk info from the first successful chunk (if any) + chunk_info = {} + if successful_chunks and download_results: + first_successful = next((r for r in download_results if r["success"]), None) + if first_successful: + original_chunk_info = first_successful["chunk_data"].get("chunk_info", {}) + chunk_info = { + "original_dataset": original_chunk_info.get("original_dataset", dataset_id), + "requested_chunks": chunk_ids, + "successful_chunks": successful_chunks, + "failed_chunks": failed_chunks, + "total_chunks_requested": len(chunk_ids), + "successful_downloads": len(successful_chunks), + "failed_downloads": len(failed_chunks), + "total_aggregated_items": total_items, + "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" if successful_chunks else "none" + } + + # Prepare the final aggregated payload + if successful_chunks: + aggregated_payload = { + "success": True, + "dataset_id": dataset_id, + "chunk_info": chunk_info, + "aggregated_data": aggregated_data, + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": len(successful_chunks), + "failed_downloads": len(failed_chunks), + "successful_chunk_ids": successful_chunks, + "failed_chunk_ids": failed_chunks, + "total_items_aggregated": total_items + }, + "download_details": download_results, + "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks" + } + else: + aggregated_payload = { + "success": False, + "dataset_id": dataset_id, + "chunk_info": chunk_info, + "aggregated_data": [], + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": 0, + "failed_downloads": len(failed_chunks), + "successful_chunk_ids": [], + "failed_chunk_ids": failed_chunks, + "total_items_aggregated": 0 + }, + "download_details": download_results, + "error": "All chunk downloads failed", + "message": f"Failed to download any of the {len(chunk_ids)} requested chunks" + } + + log(f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}") + log(f"Total items aggregated: {total_items}") + + return aggregated_payload + + except Exception as e: + log(f"Error during multi-chunk aggregation: {str(e)}") + traceback.print_exc() + return { + "success": False, + "dataset_id": dataset_id, + "chunk_info": {}, + "aggregated_data": [], + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": 0, + "failed_downloads": len(chunk_ids), + "successful_chunk_ids": [], + "failed_chunk_ids": chunk_ids, + "total_items_aggregated": 0 + }, + "error": str(e), + "message": "Internal error during multi-chunk aggregation" + } + +def parse_chunk_ids(chunk_ids_str: str) -> List[int]: + """ + Parse chunk IDs from string format "1 2 3" to list [1, 2, 3]. + + Args: + chunk_ids_str: String containing space-separated chunk IDs + + Returns: + List of integer chunk IDs + """ + try: + # Split by spaces and convert to integers + chunk_ids = [int(chunk_id.strip()) for chunk_id in chunk_ids_str.split() if chunk_id.strip()] + log(f"Parsed chunk IDs: {chunk_ids}") + return chunk_ids + except ValueError as e: + log(f"Error parsing chunk IDs '{chunk_ids_str}': {str(e)}") + raise ValueError(f"Invalid chunk IDs format. Expected space-separated integers, got: '{chunk_ids_str}'") + +def main(): + """Main function to handle multi-chunk download and aggregation process.""" + parser = argparse.ArgumentParser(description='Download and aggregate multiple chunks from S3') + parser.add_argument('--dataset-id', required=True, help='Dataset ID') + parser.add_argument('--chunk-ids', required=True, help='Space-separated chunk IDs (e.g., "1 2 3")') + parser.add_argument('--output-json', help='Output file path for results JSON') + + args = parser.parse_args() + + try: + log(f"Processing multi-chunk request - Dataset: {args.dataset_id}, Chunk IDs: {args.chunk_ids}") + + # Parse chunk IDs + chunk_ids = parse_chunk_ids(args.chunk_ids) + + if not chunk_ids: + raise ValueError("No valid chunk IDs provided") + + # Download and aggregate chunks + result = download_multiple_chunks(args.dataset_id, chunk_ids) + + # Output results + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(result, f, indent=2) + log(f"Results written to {args.output_json}") + else: + # Output ONLY the JSON to stdout (this goes to CronManager) + print(json.dumps(result)) + + log(f"Multi-chunk processing completed - Success: {result['success']}") + + # Exit with appropriate code + sys.exit(0 if result['success'] else 1) + + except Exception as e: + log(f"Internal error: {str(e)}") + traceback.print_exc() + + error_result = { + "success": False, + "dataset_id": args.dataset_id, + "chunk_ids": args.chunk_ids, + "error": str(e), + "message": "Script execution failed" + } + + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(error_result, f, indent=2) + else: + print(json.dumps(error_result)) + + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/dataset_file_hanlder/handlers/__init__.py b/src/dataset_file_hanlder/handlers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/dataset_file_hanlder/handlers/response_handler.py b/src/dataset_file_hanlder/handlers/response_handler.py new file mode 100644 index 00000000..19264ea7 --- /dev/null +++ b/src/dataset_file_hanlder/handlers/response_handler.py @@ -0,0 +1,41 @@ +"""Handler for API response formatting.""" + +from typing import List, Dict, Any + +from models.schemas import DownloadResponse, DownloadedFile + + +class ResponseHandler: + """Handler class for formatting API responses.""" + + @staticmethod + def format_download_response( + decoded_data: List[Dict[str, Any]], + downloaded_files: List[DownloadedFile], + successful_downloads: int, + failed_downloads: int, + extracted_folders: List[Dict[str, str]], + ) -> DownloadResponse: + """ + Format download response. + + Args: + decoded_data: Original decoded data + downloaded_files: List of downloaded files + successful_downloads: Number of successful downloads + failed_downloads: Number of failed downloads + extracted_folders: List of extracted folder information + + Returns: + Formatted download response + """ + return DownloadResponse( + success=successful_downloads > 0, + message=f"Downloaded {successful_downloads} files, {failed_downloads} failed", + total_downloads=len(decoded_data), + successful_downloads=successful_downloads, + failed_downloads=failed_downloads, + downloaded_files=downloaded_files, + extracted_folders=extracted_folders, + total_extracted_folders=len(extracted_folders), + ) diff --git a/src/dataset_file_hanlder/models/__init__.py b/src/dataset_file_hanlder/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/dataset_file_hanlder/models/schemas.py b/src/dataset_file_hanlder/models/schemas.py new file mode 100644 index 00000000..c31722ce --- /dev/null +++ b/src/dataset_file_hanlder/models/schemas.py @@ -0,0 +1,69 @@ +"""Pydantic models for API requests and responses.""" + +from pydantic import BaseModel +from typing import List, Dict, Optional, Any + + +class DownloadRequest(BaseModel): + """Request model for downloading files from signed URLs.""" + + encoded_data: str + extract_files: Optional[bool] = True + + +class DownloadedFile(BaseModel): + """Model for downloaded file information.""" + + agency_id: str + agency_name: str + original_filename: str + local_path: str + file_size: int + extracted_path: Optional[str] = None + extraction_success: bool = False + + +class DownloadResponse(BaseModel): + """Response model for download operation.""" + + success: bool + message: str + total_downloads: int + successful_downloads: int + failed_downloads: int + downloaded_files: List[DownloadedFile] + extracted_folders: List[Dict[str, str]] + total_extracted_folders: int + +class ChunkDownloadRequest(BaseModel): + """Request model for downloading a single chunk.""" + dataset_id: str + page_num: int + + +class MultiChunkDownloadRequest(BaseModel): + """Request model for downloading multiple chunks.""" + dataset_id: str + chunk_ids: List[int] + + +class ChunkDownloadResponse(BaseModel): + """Response model for single chunk download.""" + success: bool + dataset_id: str + page_num: Optional[int] = None + chunk_data: Optional[Dict[str, Any]] = None + error: Optional[str] = None + message: str + + +class MultiChunkDownloadResponse(BaseModel): + """Response model for multi-chunk download and aggregation.""" + success: bool + dataset_id: str + chunk_info: Optional[Dict[str, Any]] = None + aggregated_data: List[Dict[str, Any]] + download_summary: Dict[str, Any] + download_details: Optional[List[Dict[str, Any]]] = None + error: Optional[str] = None + message: str diff --git a/src/dataset_file_hanlder/multiple_chunk_handler.py b/src/dataset_file_hanlder/multiple_chunk_handler.py new file mode 100644 index 00000000..17703ff7 --- /dev/null +++ b/src/dataset_file_hanlder/multiple_chunk_handler.py @@ -0,0 +1,234 @@ +"""Service for handling multiple chunk downloads and aggregation from S3.""" + +import json +import os +import traceback +from typing import List, Dict, Any +from loguru import logger + +from services.s3_ferry_service import S3Ferry + + +class MultiChunkService: + """Service class for handling multiple chunk operations.""" + + def __init__(self): + """Initialize the multi-chunk service.""" + self.s3_ferry_service = S3Ferry() + logger.info("MultiChunkService initialized") + + def download_single_chunk_from_s3(self, dataset_id: str, chunk_id: int) -> Dict[str, Any]: + """ + Download a single chunk from S3 bucket. + + Args: + dataset_id: Dataset ID + chunk_id: Chunk ID to download + + Returns: + Dictionary containing download result + """ + try: + logger.info(f"Downloading chunk {chunk_id} for dataset {dataset_id}") + + # Define S3 source path and local destination + chunk_filename = f"{chunk_id}.json" + s3_source_path = f"{dataset_id}/{chunk_filename}" + local_dest_path = f"temp_chunks/{chunk_filename}" + + # Create the temp_chunks directory if it doesn't exist + os.makedirs("temp_chunks", exist_ok=True) + + # Download chunk from S3 using S3Ferry service + response = self.s3_ferry_service.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3" + ) + + logger.info(f"S3Ferry response status for chunk {chunk_id}: {response.status_code}") + + if response.status_code in [200, 201]: + # Read the downloaded chunk file + local_file_path = f"/app/{local_dest_path}" + + if os.path.exists(local_file_path): + logger.info(f"Successfully downloaded chunk {chunk_id} to: {local_file_path}") + + # Read and parse the chunk data + with open(local_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Clean up the downloaded file + os.remove(local_file_path) + logger.info(f"Cleaned up downloaded file: {local_file_path}") + + return { + "success": True, + "chunk_id": chunk_id, + "chunk_data": chunk_data, + "message": f"Successfully downloaded chunk {chunk_id}" + } + else: + return { + "success": False, + "chunk_id": chunk_id, + "error": f"Downloaded file not found at: {local_file_path}", + "message": f"Chunk {chunk_id} download completed but file not accessible" + } + else: + return { + "success": False, + "chunk_id": chunk_id, + "error": f"S3 download failed: HTTP {response.status_code}", + "response_body": response.text, + "message": f"Failed to download chunk {chunk_id} from S3" + } + + except Exception as e: + logger.error(f"Error downloading chunk {chunk_id}: {str(e)}") + traceback.print_exc() + return { + "success": False, + "chunk_id": chunk_id, + "error": str(e), + "message": f"Internal error during chunk {chunk_id} download" + } + + def download_multiple_chunks(self, dataset_id: str, chunk_ids: List[int]) -> Dict[str, Any]: + """ + Download multiple chunks from S3 and aggregate them. + + Args: + dataset_id: Dataset ID + chunk_ids: List of chunk IDs to download + + Returns: + Dictionary containing aggregated chunk data or error information + """ + try: + logger.info(f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}") + + download_results = [] + successful_chunks = [] + failed_chunks = [] + aggregated_data = [] + total_items = 0 + + # Download each chunk + for chunk_id in chunk_ids: + result = self.download_single_chunk_from_s3(dataset_id, chunk_id) + download_results.append(result) + + if result["success"]: + successful_chunks.append(chunk_id) + chunk_data = result["chunk_data"] + + # Extract data array from chunk + chunk_items = chunk_data.get("data", []) + aggregated_data.extend(chunk_items) + total_items += len(chunk_items) + + logger.info(f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation") + else: + failed_chunks.append(chunk_id) + logger.error(f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}") + + # Prepare chunk info from the first successful chunk (if any) + chunk_info = {} + if successful_chunks and download_results: + first_successful = next((r for r in download_results if r["success"]), None) + if first_successful: + original_chunk_info = first_successful["chunk_data"].get("chunk_info", {}) + chunk_info = { + "original_dataset": original_chunk_info.get("original_dataset", dataset_id), + "requested_chunks": chunk_ids, + "successful_chunks": successful_chunks, + "failed_chunks": failed_chunks, + "total_chunks_requested": len(chunk_ids), + "successful_downloads": len(successful_chunks), + "failed_downloads": len(failed_chunks), + "total_aggregated_items": total_items, + "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" if successful_chunks else "none" + } + + # Prepare the final aggregated payload + if successful_chunks: + aggregated_payload = { + "success": True, + "dataset_id": dataset_id, + "chunk_info": chunk_info, + "aggregated_data": aggregated_data, + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": len(successful_chunks), + "failed_downloads": len(failed_chunks), + "successful_chunk_ids": successful_chunks, + "failed_chunk_ids": failed_chunks, + "total_items_aggregated": total_items + }, + "download_details": download_results, + "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks" + } + else: + aggregated_payload = { + "success": False, + "dataset_id": dataset_id, + "chunk_info": chunk_info, + "aggregated_data": [], + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": 0, + "failed_downloads": len(failed_chunks), + "successful_chunk_ids": [], + "failed_chunk_ids": failed_chunks, + "total_items_aggregated": 0 + }, + "download_details": download_results, + "error": "All chunk downloads failed", + "message": f"Failed to download any of the {len(chunk_ids)} requested chunks" + } + + logger.info(f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}") + logger.info(f"Total items aggregated: {total_items}") + + return aggregated_payload + + except Exception as e: + logger.error(f"Error during multi-chunk aggregation: {str(e)}") + traceback.print_exc() + return { + "success": False, + "dataset_id": dataset_id, + "chunk_info": {}, + "aggregated_data": [], + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": 0, + "failed_downloads": len(chunk_ids), + "successful_chunk_ids": [], + "failed_chunk_ids": chunk_ids, + "total_items_aggregated": 0 + }, + "error": str(e), + "message": "Internal error during multi-chunk aggregation" + } + + def parse_chunk_ids(self, chunk_ids_str: str) -> List[int]: + """ + Parse chunk IDs from string format. + + Args: + chunk_ids_str: Space-separated chunk IDs (e.g., "1 2 3") + + Returns: + List of chunk IDs as integers + """ + try: + chunk_ids = [int(x.strip()) for x in chunk_ids_str.split() if x.strip().isdigit()] + logger.info(f"Parsed chunk IDs: {chunk_ids}") + return chunk_ids + except Exception as e: + logger.error(f"Error parsing chunk IDs '{chunk_ids_str}': {str(e)}") + return [] \ No newline at end of file diff --git a/src/s3_dataset_processor/requirements.txt b/src/dataset_file_hanlder/requirements.txt similarity index 80% rename from src/s3_dataset_processor/requirements.txt rename to src/dataset_file_hanlder/requirements.txt index c916b736..364fc7cf 100644 --- a/src/s3_dataset_processor/requirements.txt +++ b/src/dataset_file_hanlder/requirements.txt @@ -1,6 +1,7 @@ fastapi uvicorn[standard] pydantic -python-multipart +loguru requests -boto3 \ No newline at end of file +python-multipart +aiofiles \ No newline at end of file diff --git a/src/dataset_file_hanlder/services/__init__.py b/src/dataset_file_hanlder/services/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/dataset_file_hanlder/services/download_service.py b/src/dataset_file_hanlder/services/download_service.py new file mode 100644 index 00000000..1ca27964 --- /dev/null +++ b/src/dataset_file_hanlder/services/download_service.py @@ -0,0 +1,114 @@ +"""Service for file download operations.""" + +import os +import requests +from typing import List, Dict, Any +from urllib.parse import urlparse +from config.settings import settings +from models.schemas import DownloadedFile +import sys +import logging +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) + + +class DownloadService: + """Service class for handling file download operations.""" + + def __init__(self): + """Initialize the download service.""" + self.data_dir = settings.DATA_DIR + self.timeout = settings.DOWNLOAD_TIMEOUT + self.chunk_size = settings.CHUNK_SIZE + + def download_file(self, url: str, local_path: str) -> bool: + """ + Download a file from URL to local path. + + Args: + url: The presigned URL to download from + local_path: Local file path to save the file + + Returns: + True if download successful, False otherwise + """ + try: + response = requests.get(url, stream=True, timeout=self.timeout) + response.raise_for_status() + + # Ensure directory exists + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + with open(local_path, "wb") as f: + for chunk in response.iter_content(chunk_size=self.chunk_size): + if chunk: + f.write(chunk) + + return True + except Exception as e: + logger.error(f"Failed to download {url}: {e}") + return False + + def process_downloads(self, decoded_data: List[Dict[str, Any]]) -> tuple: + """ + Process multiple downloads from decoded data. + + Args: + decoded_data: List of decoded URL data + + Returns: + Tuple of (downloaded_files, successful_downloads, failed_downloads) + """ + downloaded_files = [] + successful_downloads = 0 + failed_downloads = 0 + try: + for entry in decoded_data: + agency_id = entry.get("agencyId", "unknown") + agency_name = entry.get("agencyName", "Unknown Agency") + signed_url = entry.get("dataUrl", "") + + if not signed_url: + failed_downloads += 1 + continue + + # Parse URL to get filename + parsed_url = urlparse(signed_url) + original_filename = ( + parsed_url.path.split("/")[-1] + if parsed_url.path + else f"{agency_id}.zip" + ) + + # Download file to data directory + local_file_path = os.path.join(self.data_dir, original_filename) + logger.info(f"Downloading {original_filename} for agency {agency_id} with name {agency_name}") + + if self.download_file(signed_url, local_file_path): + file_size = os.path.getsize(local_file_path) + + downloaded_file = DownloadedFile( + agency_id=agency_id, + agency_name=agency_name, + original_filename=original_filename, + local_path=local_file_path, + file_size=file_size, + ) + + downloaded_files.append(downloaded_file) + successful_downloads += 1 + logger.info(f"Successfully downloaded {original_filename}") + + else: + failed_downloads += 1 + logger.error(f"Failed to download {original_filename}") + except Exception as e: + logger.error(f"Error processing downloads: {e}") + return downloaded_files, successful_downloads, failed_downloads + return downloaded_files, successful_downloads, failed_downloads diff --git a/src/dataset_file_hanlder/services/extraction_service.py b/src/dataset_file_hanlder/services/extraction_service.py new file mode 100644 index 00000000..a81318de --- /dev/null +++ b/src/dataset_file_hanlder/services/extraction_service.py @@ -0,0 +1,138 @@ +"""Service for file extraction operations.""" + +import os +import zipfile +import shutil +from typing import List, Dict + +from config.settings import settings +from models.schemas import DownloadedFile +import sys +import logging +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) + + +class ExtractionService: + """Service class for handling file extraction operations.""" + + def __init__(self): + """Initialize the extraction service.""" + self.data_dir = settings.DATA_DIR + + def extract_zip_file(self, zip_path: str, extract_to: str) -> bool: + """ + Extract a ZIP file to the specified directory. + + Args: + zip_path: Path to the ZIP file + extract_to: Directory to extract files to + + Returns: + True if extraction successful, False otherwise + """ + try: + with zipfile.ZipFile(zip_path, "r") as zip_ref: + zip_ref.extractall(extract_to) + return True + except Exception as e: + print(f"Failed to extract {zip_path}: {e}") + return False + + def process_extractions( + self, downloaded_files: List[DownloadedFile], extract_files: bool = True + ) -> List[Dict[str, str]]: + """ + Process extractions for downloaded files. + + Args: + downloaded_files: List of downloaded files to extract + extract_files: Whether to extract files + + Returns: + List of extracted folder information + """ + extracted_folders = [] + + if not extract_files: + return extracted_folders + + for downloaded_file in downloaded_files: + if not downloaded_file.original_filename.lower().endswith(".zip"): + continue + + # Extract agency name from filename + # agency_name = downloaded_file.original_filename.replace(".zip", "") + agency_name = downloaded_file.agency_name + agency_dir = os.path.join(self.data_dir, agency_name) + os.makedirs(agency_dir, exist_ok=True) + + # Extract to temporary directory first + temp_extract_dir = os.path.join(self.data_dir, f"temp_{agency_name}") + logger.info( + f"Extracting {downloaded_file.original_filename} to temporary directory: {temp_extract_dir}" + ) + + if self.extract_zip_file(downloaded_file.local_path, temp_extract_dir): + self._move_extracted_contents(temp_extract_dir, agency_dir) + + # Update downloaded file info + downloaded_file.extracted_path = agency_dir + downloaded_file.extraction_success = True + logger.info( + f"Successfully extracted {downloaded_file.original_filename}" + ) + + # Add to extracted folders list + extracted_folders.append( + {"agency_id": downloaded_file.agency_id, "agency_name": downloaded_file.agency_name, "folder_path": agency_dir} + ) + + # Remove the ZIP file after successful extraction + os.remove(downloaded_file.local_path) + logger.info(f"Removed ZIP file {downloaded_file.local_path}") + else: + downloaded_file.extraction_success = False + logger.error(f"Failed to extract {downloaded_file.original_filename}") + + return extracted_folders + + def _move_extracted_contents(self, temp_extract_dir: str, agency_dir: str) -> None: + """ + Move extracted contents from temporary directory to agency directory. + + Args: + temp_extract_dir: Temporary extraction directory + agency_dir: Target agency directory + """ + extracted_items = os.listdir(temp_extract_dir) + + if len(extracted_items) == 1 and os.path.isdir( + os.path.join(temp_extract_dir, extracted_items[0]) + ): + # Single folder was extracted - move its contents to the agency directory + nested_folder = os.path.join(temp_extract_dir, extracted_items[0]) + logger.info( + f"Found nested folder structure, moving contents from {nested_folder} to {agency_dir}" + ) + + for item in os.listdir(nested_folder): + src = os.path.join(nested_folder, item) + dst = os.path.join(agency_dir, item) + shutil.move(src, dst) + else: + # Multiple items or files extracted - move everything to agency directory + logger.info(f"Moving extracted contents directly to {agency_dir}") + for item in extracted_items: + src = os.path.join(temp_extract_dir, item) + dst = os.path.join(agency_dir, item) + shutil.move(src, dst) + + # Clean up temporary directory + shutil.rmtree(temp_extract_dir, ignore_errors=True) diff --git a/src/dataset_file_hanlder/services/s3_ferry_service.py b/src/dataset_file_hanlder/services/s3_ferry_service.py new file mode 100644 index 00000000..a7e3d2fc --- /dev/null +++ b/src/dataset_file_hanlder/services/s3_ferry_service.py @@ -0,0 +1,147 @@ +"""Service for S3Ferry file transfer operations.""" + +import requests +import logging +import traceback +from typing import Dict, Any + +# Configure logging +logger = logging.getLogger(__name__) + +class S3Ferry: + """Service class for handling S3Ferry file transfer operations.""" + + def __init__(self, base_url: str = "http://gc-s3-ferry:3000"): + """ + Initialize the S3Ferry service. + + Args: + base_url: Base URL for the S3Ferry service + """ + self.base_url = base_url + self.url = f"{base_url}/v1/files/copy" + logger.info(f"S3Ferry service initialized with URL: {self.url}") + + def transfer_file(self, destination_file_path: str, destination_storage_type: str, + source_file_path: str, source_storage_type: str) -> requests.Response: + """ + Transfer a file using S3Ferry service. + + Args: + destination_file_path: Path where the file should be stored in destination + destination_storage_type: Type of destination storage (e.g., 's3', 'local') + source_file_path: Path of the source file + source_storage_type: Type of source storage (e.g., 'local', 's3') + + Returns: + Response object from the S3Ferry service + """ + try: + payload = self.get_s3_ferry_payload( + destination_file_path, + destination_storage_type, + source_file_path, + source_storage_type + ) + + logger.info(f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}") + logger.debug(f"[S3_FERRY] Payload: {payload}") + + response = requests.post( + self.url, + json=payload, + headers={"Content-Type": "application/json"}, + timeout=60 + ) + + logger.info(f"[S3_FERRY] Transfer response status: {response.status_code}") + + # Accept both 200 (OK) and 201 (Created) as success + if response.status_code not in [200, 201]: + logger.error(f"[S3_FERRY] Transfer failed: {response.text}") + else: + logger.info(f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})") + + return response + + except Exception as e: + logger.error(f"[S3_FERRY] Error during file transfer: {str(e)}") + traceback.print_exc() + raise + + def get_s3_ferry_payload(self, destination_file_path: str, destination_storage_type: str, + source_file_path: str, source_storage_type: str) -> Dict[str, str]: + """ + Generate S3Ferry payload for file transfer. + + Args: + destination_file_path: Path where the file should be stored in destination + destination_storage_type: Type of destination storage + source_file_path: Path of the source file + source_storage_type: Type of source storage + + Returns: + Dictionary containing the S3Ferry payload + """ + payload = { + "destinationFilePath": destination_file_path, + "destinationStorageType": destination_storage_type, + "sourceFilePath": source_file_path, + "sourceStorageType": source_storage_type + } + + return payload + + def upload_to_s3(self, local_file_path: str, s3_destination_path: str) -> requests.Response: + """ + Convenience method to upload a local file to S3. + + Args: + local_file_path: Path to the local file + s3_destination_path: S3 destination path (e.g., 'bucket/folder/file.json') + + Returns: + Response object from the S3Ferry service + """ + return self.transfer_file( + destination_file_path=s3_destination_path, + destination_storage_type="S3", + source_file_path=local_file_path, + source_storage_type="FS" + ) + + def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> requests.Response: + """ + Convenience method to download a file from S3 to local storage. + + Args: + s3_source_path: S3 source path (e.g., 'bucket/folder/file.json') + local_destination_path: Local destination path + + Returns: + Response object from the S3Ferry service + """ + return self.transfer_file( + destination_file_path=local_destination_path, + destination_storage_type="local", + source_file_path=s3_source_path, + source_storage_type="s3" + ) + + # def copy_s3_to_s3(self, source_s3_path: str, destination_s3_path: str) -> requests.Response: + # """ + # Convenience method to copy files between S3 locations. + + # Args: + # source_s3_path: Source S3 path + # destination_s3_path: Destination S3 path + + # Returns: + # Response object from the S3Ferry service + # """ + # return self.transfer_file( + # destination_file_path=destination_s3_path, + # destination_storage_type="s3", + # source_file_path=source_s3_path, + # source_storage_type="s3" + # ) \ No newline at end of file diff --git a/src/dataset_file_hanlder/services/url_decoder_service.py b/src/dataset_file_hanlder/services/url_decoder_service.py new file mode 100644 index 00000000..901008b0 --- /dev/null +++ b/src/dataset_file_hanlder/services/url_decoder_service.py @@ -0,0 +1,38 @@ +"""Service for URL decoding operations.""" + +import urllib.parse +import json +from typing import List, Dict, Any + + +class URLDecoderService: + """Service class for handling URL decoding operations.""" + + @staticmethod + def decode_signed_urls(encoded_data: str) -> List[Dict[str, Any]]: + """ + Decode URL-encoded signed URLs data. + + Args: + encoded_data: URL-encoded JSON string containing signed URLs + + Returns: + List of decoded URL data dictionaries + + Raises: + ValueError: If decoding or JSON parsing fails + """ + try: + # URL decode the data + decoded_data = urllib.parse.unquote(encoded_data) + + # Parse JSON + parsed_data = json.loads(decoded_data) + + return parsed_data + + except json.JSONDecodeError as e: + raise ValueError(f"Failed to parse JSON: {e}") + + except Exception as e: + raise ValueError(f"Failed to decode data: {e}") diff --git a/src/dataset_file_hanlder/single_chunk_handler.py b/src/dataset_file_hanlder/single_chunk_handler.py new file mode 100644 index 00000000..5760e8d7 --- /dev/null +++ b/src/dataset_file_hanlder/single_chunk_handler.py @@ -0,0 +1,118 @@ +"""Service for handling single chunk downloads from S3.""" + +import json +import os +import tempfile +import traceback +from typing import Dict, Any +from loguru import logger + +from services.s3_ferry_service import S3Ferry + + +class ChunkService: + """Service class for handling single chunk operations.""" + + def __init__(self): + """Initialize the chunk service.""" + self.s3_ferry_service = S3Ferry() + logger.info("ChunkService initialized") + + def download_chunk_from_s3(self, dataset_id: str, page_num: int) -> Dict[str, Any]: + """ + Download a specific chunk from S3 bucket. + + Args: + dataset_id: Dataset ID + page_num: Page number (chunk number) + + Returns: + Dictionary containing chunk data or error information + """ + try: + logger.info(f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}") + + # Create temporary directory for download + temp_dir = tempfile.mkdtemp(prefix="chunk_download_") + logger.info(f"Created temporary directory: {temp_dir}") + + # Define S3 source path and local destination + chunk_filename = f"{page_num}.json" + s3_source_path = f"{dataset_id}/{chunk_filename}" + local_dest_path = f"temp_chunks/{chunk_filename}" + + # Create the temp_chunks directory if it doesn't exist + temp_chunks_dir = "temp_chunks" + os.makedirs(temp_chunks_dir, exist_ok=True) + logger.info(f"Created/verified temp directory: {temp_chunks_dir}") + + logger.info(f"S3 source path: {s3_source_path}") + logger.info(f"Local destination: {local_dest_path}") + + # Download chunk from S3 using S3Ferry service + response = self.s3_ferry_service.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3" + ) + + logger.info(f"S3Ferry response status: {response.status_code}") + logger.info(f"S3Ferry response body: {response.text}") + + if response.status_code in [200, 201]: + # Read the downloaded chunk file + local_file_path = f"/app/{local_dest_path}" + + if os.path.exists(local_file_path): + logger.info(f"Successfully downloaded chunk to: {local_file_path}") + + # Read and parse the chunk data + with open(local_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Clean up the downloaded file + os.remove(local_file_path) + logger.info(f"Cleaned up downloaded file: {local_file_path}") + + # Remove empty directory if it exists + try: + os.rmdir(os.path.dirname(local_file_path)) + except OSError: + pass # Directory not empty or doesn't exist + + return { + "success": True, + "dataset_id": dataset_id, + "page_num": page_num, + "chunk_data": chunk_data, + "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}" + } + else: + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": f"Downloaded file not found at: {local_file_path}", + "message": "File download completed but file not accessible" + } + else: + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": f"S3 download failed: HTTP {response.status_code}", + "response_body": response.text, + "message": f"Failed to download chunk {page_num} from S3" + } + + except Exception as e: + logger.error(f"Error during chunk download: {str(e)}") + traceback.print_exc() + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": str(e), + "message": "Internal error during chunk download" + } \ No newline at end of file diff --git a/src/s3_dataset_processor/dataset_generation_callback_processor.py b/src/s3_dataset_processor/dataset_generation_callback_processor.py new file mode 100644 index 00000000..b73ea390 --- /dev/null +++ b/src/s3_dataset_processor/dataset_generation_callback_processor.py @@ -0,0 +1,571 @@ +#!/usr/bin/env python3 +""" +Standalone script for processing dataset generation callbacks. +Replaces the FastAPI background task with direct synchronous execution. +""" +import sys +import json +import argparse +import logging +import re +import requests +import traceback +import os +from pathlib import Path + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +logger.info(f"🔍 Script directory: {script_dir}") +logger.info(f"🔍 Script directory exists: {script_dir.exists()}") +logger.info(f"🔍 Python path: {sys.path}") + +try: + from services.url_decoder_service import URLDecoderService + from services.s3_ferry_service import S3Ferry + url_decoder_service = URLDecoderService() + s3_ferry_service = S3Ferry() + logger.info("✅ Successfully imported URLDecoderService") +except ImportError as e: + logger.error(f"❌ Failed to import URLDecoderService: {e}") + logger.error(f"Python path: {sys.path}") + logger.error(f"Script directory exists: {script_dir.exists()}") + if script_dir.exists(): + logger.error(f"Contents of script directory: {list(script_dir.iterdir())}") + # Try to check services directory + services_dir = script_dir / 'services' + if services_dir.exists(): + logger.error(f"Contents of services directory: {list(services_dir.iterdir())}") + traceback.print_exc() + sys.exit(1) + +def chunk_dataset(dataset_path: str, chunk_size: int = 5): + """ + Chunk the generated dataset into smaller files with specified number of records. + + Args: + dataset_path: Path to the generated dataset JSON file + chunk_size: Number of records per chunk (default: 5) + + Returns: + List of chunk file paths created + """ + try: + logger.info(f"[CHUNKING] Starting chunking for: {dataset_path}") + + # Read the original dataset + with open(dataset_path, 'r', encoding='utf-8') as f: + dataset = json.load(f) + + aggregated_data = dataset.get('aggregated_data', []) + total_items = len(aggregated_data) + + logger.info(f"[CHUNKING] Total items to chunk: {total_items}") + logger.info(f"[CHUNKING] Chunk size: {chunk_size}") + + if total_items == 0: + logger.warning("[CHUNKING] No data to chunk") + return [] + + # Create chunks directory + dataset_name = Path(dataset_path).stem + chunks_dir = Path(dataset_path).parent / f"{dataset_name}_chunks" + chunks_dir.mkdir(exist_ok=True) + + chunk_files = [] + + # Create chunks with incremental naming (1.json, 2.json, etc.) + for i in range(0, total_items, chunk_size): + chunk_data = aggregated_data[i:i + chunk_size] + chunk_number = (i // chunk_size) + 1 + + # Use simple incremental naming: 1.json, 2.json, 3.json, etc. + chunk_filename = f"{chunk_number}.json" + chunk_path = chunks_dir / chunk_filename + + # Create chunk with metadata + chunk_content = { + "chunk_info": { + "original_dataset": dataset_name, + "chunk_number": chunk_number, + "total_chunks": (total_items + chunk_size - 1) // chunk_size, + "items_in_chunk": len(chunk_data), + "chunk_range": f"{i + 1}-{min(i + chunk_size, total_items)}" + }, + "data": chunk_data + } + + # Write chunk file + with open(chunk_path, 'w', encoding='utf-8') as f: + json.dump(chunk_content, f, indent=2, ensure_ascii=False) + + chunk_files.append(str(chunk_path)) + logger.info(f"[CHUNKING] Created chunk {chunk_number}: {chunk_filename} ({len(chunk_data)} items)") + + logger.info(f"[CHUNKING] ✅ Created {len(chunk_files)} chunk files in: {chunks_dir}") + return chunk_files + + except Exception as e: + logger.error(f"[CHUNKING] ❌ Error during chunking: {str(e)}") + traceback.print_exc() + raise + +def upload_chunks_to_s3(chunk_files: list, dataset_id: str): + """ + Upload chunk files to S3 using S3Ferry service. + + Args: + chunk_files: List of chunk file paths to upload + dataset_id: Dataset ID for organizing uploads + + Returns: + List of upload results with S3 URLs + """ + try: + logger.info(f"[S3_UPLOAD] Starting S3 upload for {len(chunk_files)} chunks using S3Ferry service") + + upload_results = [] + + for chunk_file in chunk_files: + # Extract just the filename (e.g., "1.json", "2.json") + chunk_filename = Path(chunk_file).name + + logger.info(f"[S3_UPLOAD] Processing chunk file: {chunk_file}") + logger.info(f"[S3_UPLOAD] Chunk filename: {chunk_filename}") + + # Create the exact payload format you specified + # destinationFilePath: "/{dataset_id}/{filename}" (e.g., "/3/2.json") + destination_file_path = f"/{dataset_id}/{chunk_filename}" + + # sourceFilePath: relative path from gc-s3-ferry's volume mount (e.g., "output_datasets/3_chunks/2.json") + source_file_path = f"output_datasets/{dataset_id}_chunks/{chunk_filename}" + + logger.info(f"[S3_UPLOAD] Destination path: {destination_file_path}") + logger.info(f"[S3_UPLOAD] Source path: {source_file_path}") + + try: + # Use S3Ferry service with exact payload format + response = s3_ferry_service.transfer_file( + destination_file_path=destination_file_path, # "/3/2.json" + destination_storage_type="S3", # "S3" + source_file_path=source_file_path, # "output_datasets/3_chunks/2.json" + source_storage_type="FS" # "FS" + ) + + logger.info(f"[S3_UPLOAD] S3Ferry response status: {response.status_code}") + logger.info(f"[S3_UPLOAD] S3Ferry response body: {response.text}") + + # Accept both 200 (OK) and 201 (Created) as success + if response.status_code in [200, 201]: + # Parse response if needed + response_data = {} + try: + response_data = response.json() if response.text else {} + except Exception: + pass + + + upload_results.append({ + "chunk_file": chunk_filename, + "destination_path": destination_file_path, + "source_path": source_file_path, + "success": True, + "response": response_data, + "status_code": response.status_code + }) + logger.info(f"[S3_UPLOAD] ✅ Uploaded: {chunk_filename} -> (HTTP {response.status_code})") + else: + upload_results.append({ + "chunk_file": chunk_filename, + "error": f"HTTP {response.status_code}: {response.text}", + "success": False, + "source_path": source_file_path, + "status_code": response.status_code + }) + logger.error(f"[S3_UPLOAD] ❌ Failed to upload {chunk_filename}: HTTP {response.status_code}") + logger.error(f"[S3_UPLOAD] Response: {response.text}") + + except requests.exceptions.RequestException as e: + upload_results.append({ + "chunk_file": chunk_filename, + "error": str(e), + "success": False, + "source_path": source_file_path + }) + logger.error(f"[S3_UPLOAD] ❌ Request failed for {chunk_filename}: {str(e)}") + traceback.print_exc() + + except Exception as e: + upload_results.append({ + "chunk_file": chunk_filename, + "error": str(e), + "success": False, + "source_path": source_file_path + }) + logger.error(f"[S3_UPLOAD] ❌ Unexpected error for {chunk_filename}: {str(e)}") + traceback.print_exc() + + successful_uploads = [r for r in upload_results if r.get('success', False)] + failed_uploads = [r for r in upload_results if not r.get('success', False)] + + logger.info(f"[S3_UPLOAD] ✅ Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed") + + # Log detailed results + if successful_uploads: + logger.info("[S3_UPLOAD] 📊 Successful uploads:") + for result in successful_uploads: + status_code = result.get('status_code', 'unknown') + logger.info(f"[S3_UPLOAD] - {result['chunk_file']}: {result['s3_url']} (HTTP {status_code})") + + if failed_uploads: + logger.warning("[S3_UPLOAD] ⚠️ Failed uploads:") + for result in failed_uploads: + logger.warning(f"[S3_UPLOAD] - {result['chunk_file']}: {result['error']}") + + return upload_results + + except Exception as e: + logger.error(f"[S3_UPLOAD] ❌ Error during S3 upload: {str(e)}") + traceback.print_exc() + raise + +def update_chunk_metadata(chunk_file_path: str, dataset_id: str, chunk_number: int): + """ + Update chunk metadata in the database after successful S3 upload. + + Args: + chunk_file_path: Path to the chunk file to extract agency information + dataset_id: Dataset ID + chunk_number: Chunk number (1, 2, 3, etc.) + + Returns: + Response from the metadata update endpoint + """ + try: + logger.info(f"[CHUNK_METADATA] Updating metadata for chunk {chunk_number} of dataset {dataset_id}") + + # Read the chunk file to extract agency information + with open(chunk_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Extract agency IDs from the chunk data + chunk_items = chunk_data.get('data', []) + agency_ids = [] + + for item in chunk_items: + agency_id = item.get('agency_id', 'unknown') + agency_ids.append(agency_id) + + logger.info(f"[CHUNK_METADATA] Extracted {len(agency_ids)} agency IDs: {agency_ids}") + + # Create the payload for the metadata endpoint + metadata_payload = { + "datasetId": int(dataset_id), + "chunkId": chunk_number, + "includedAgencies": json.dumps(agency_ids) # Convert array to JSON string + } + + logger.info(f"[CHUNK_METADATA] Payload: {json.dumps(metadata_payload, indent=2)}") + + # Send POST request to the chunk metadata endpoint + CHUNK_METADATA_URL = "http://resql:8082/global-classifier/update-data-chunk-metadata" + + response = requests.post( + CHUNK_METADATA_URL, + json=metadata_payload, + headers={"Content-Type": "application/json"}, + timeout=30 + ) + + logger.info(f"[CHUNK_METADATA] Response status: {response.status_code}") + logger.info(f"[CHUNK_METADATA] Response body: {response.text}") + + if response.status_code == 200: + logger.info(f"[CHUNK_METADATA] ✅ Successfully updated metadata for chunk {chunk_number}") + else: + logger.warning(f"[CHUNK_METADATA] ⚠️ Metadata update failed for chunk {chunk_number}: HTTP {response.status_code}") + + return response + + except Exception as e: + logger.error(f"[CHUNK_METADATA] ❌ Error updating chunk metadata: {str(e)}") + traceback.print_exc() + raise + +def process_callback_background(file_path: str, encoded_results: str): + """ + Process the dataset generation callback with chunking and S3 upload. + This is the same function from s3_processor_api.py but now runs synchronously. + """ + try: + print(f"[CALLBACK] Starting processing for: {file_path}") + + # Extract dataset ID from file path (e.g., output_datasets/single_question/12.json -> 12) + dataset_id_match = re.search(r"/([^/]+)\.json$", file_path) + dataset_id = dataset_id_match.group(1) if dataset_id_match else "unknown" + + logger.info(f"[CALLBACK] Extracted dataset ID: {dataset_id}") + + # Step 1: Decode the results using the existing service + decoded_results = url_decoder_service.decode_signed_urls(encoded_results) + logger.info(f"[CALLBACK] Decoded {len(decoded_results)} results") + + # Step 2: Chunk the generated dataset + full_dataset_path = f"/app/output_datasets/{dataset_id}.json" + if os.path.exists(full_dataset_path): + logger.info(f"[CALLBACK] Found dataset file: {full_dataset_path}") + # Step 2.1: Upload the original aggregated dataset file + logger.info("[CALLBACK] 📤 Starting upload of original aggregated dataset...") + try: + # Upload the original dataset file to S3 + original_dataset_response = s3_ferry_service.transfer_file( + destination_file_path=f"/{dataset_id}/aggregated_dataset.json", # "/3/aggregated_dataset.json" + destination_storage_type="S3", + source_file_path=f"output_datasets/{dataset_id}.json", # "output_datasets/3.json" + source_storage_type="FS" + ) + + logger.info(f"[CALLBACK] Original dataset upload status: {original_dataset_response.status_code}") + logger.info(f"[CALLBACK] Original dataset upload response: {original_dataset_response.text}") + + if original_dataset_response.status_code in [200, 201]: + original_s3_url = f"s3://global-classifier/{dataset_id}/aggregated_dataset.json" + logger.info(f"[CALLBACK] ✅ Original dataset uploaded: {original_s3_url}") + + else: + logger.error(f"[CALLBACK] ❌ Failed to upload original dataset: HTTP {original_dataset_response.status_code}") + + + except Exception as e: + logger.error(f"[CALLBACK] ❌ Error uploading original dataset: {str(e)}") + traceback.print_exc() + + # Step 2.2: Chunk the generated dataset + chunk_files = chunk_dataset(full_dataset_path, chunk_size=5) + + # Step 3: Upload chunks to S3 + if chunk_files: + upload_results = upload_chunks_to_s3(chunk_files, dataset_id) + + # Log upload summary + successful_uploads = [r for r in upload_results if r.get('success', False)] + logger.info(f"[CALLBACK] 📊 S3 Upload Summary:") + logger.info(f"[CALLBACK] - Total chunks: {len(chunk_files)}") + logger.info(f"[CALLBACK] - Successful uploads: {len(successful_uploads)}") + logger.info(f"[CALLBACK] - Failed uploads: {len(upload_results) - len(successful_uploads)}") + + # Log S3 URLs + for result in successful_uploads: + logger.info(f"[CALLBACK] - {result['chunk_file']}: {result['s3_url']}") + + # Step 3.5: Update chunk metadata for successfully uploaded chunks + logger.info("[CALLBACK] 🔄 Starting chunk metadata updates...") + metadata_results = [] + for i, chunk_file in enumerate(chunk_files): + chunk_number = i + 1 # Chunks are numbered 1, 2, 3, etc. + + # Only update metadata for successfully uploaded chunks + chunk_filename = Path(chunk_file).name + upload_success = any( + result['chunk_file'] == chunk_filename and result['success'] + for result in upload_results + ) + + if upload_success: + try: + metadata_response = update_chunk_metadata( + chunk_file_path=chunk_file, + dataset_id=dataset_id, + chunk_number=chunk_number + ) + + metadata_results.append({ + "chunk_number": chunk_number, + "chunk_file": chunk_filename, + "success": metadata_response.status_code == 200, + "response": metadata_response.text + }) + + except Exception as e: + logger.error(f"[CALLBACK] ❌ Failed to update metadata for chunk {chunk_number}: {str(e)}") + metadata_results.append({ + "chunk_number": chunk_number, + "chunk_file": chunk_filename, + "success": False, + "error": str(e) + }) + else: + logger.warning(f"[CALLBACK] ⚠️ Skipping metadata update for chunk {chunk_number} (upload failed)") + metadata_results.append({ + "chunk_number": chunk_number, + "chunk_file": chunk_filename, + "success": False, + "error": "Upload failed" + }) + + else: + logger.warning("[CALLBACK] No chunks created, skipping S3 upload") + else: + logger.warning(f"[CALLBACK] Dataset file not found: {full_dataset_path}") + + # Step 4: Process the decoded results to create the required payload + agencies = [] + overall_success = True + + for i, result in enumerate(decoded_results): + # Extract agency_id from dataset_metadata + dataset_metadata = result.get("dataset_metadata", {}) + agency_id = dataset_metadata.get("agency_id", "unknown") + success = result.get("success", False) + + sync_status = "Synced_with_CKB" if success else "Sync_with_CKB_Failed" + + agencies.append({"agencyId": agency_id, "syncStatus": sync_status}) + + logger.info( + f"[CALLBACK] Agency {i + 1}: ID={agency_id}, Success={success}, Status={sync_status}" + ) + + if not success: + overall_success = False + + generation_status = ( + "Generation_Success" if overall_success else "Generation_Failed" + ) + + # Create the exact payload format requested + callback_payload = { + "agencies": agencies, + "datasetId": dataset_id, + "generationStatus": generation_status, + } + + # Log the processed callback + logger.info(f"[CALLBACK] {json.dumps(callback_payload, indent=2)}") + logger.info(f"[CALLBACK] Dataset ID: {dataset_id}") + logger.info(f"[CALLBACK] Generation Status: {generation_status}") + logger.info(f"[CALLBACK] Total Agencies: {len(agencies)}") + logger.info( + f"[CALLBACK] Successful Agencies: {len([a for a in agencies if a['syncStatus'] == 'Synced_with_CKB'])}" + ) + logger.info( + f"[CALLBACK] Failed Agencies: {len([a for a in agencies if a['syncStatus'] == 'Sync_with_CKB_Failed'])}" + ) + + # Step 5: Send status update to Ruuter + STATUS_UPDATE_URL = ( + "http://ruuter-public:8086/global-classifier/agencies/data/generation" + ) + + logger.info(f"[CALLBACK] Sending callback payload to: {STATUS_UPDATE_URL}") + + try: + # Send POST request to the status update endpoint + response = requests.post( + STATUS_UPDATE_URL, + json=callback_payload, + headers={"Content-Type": "application/json"}, + timeout=30, + ) + + logger.info( + f"[CALLBACK] Status update response - HTTP Status: {response.status_code}" + ) + logger.info(f"[CALLBACK] Status update response body: {response.text}") + + if response.status_code == 200: + logger.info( + "[CALLBACK] ✅ Successfully sent callback payload to status update endpoint" + ) + else: + logger.warning( + f"[CALLBACK] ⚠️ Status update endpoint returned non-200 status: {response.status_code}" + ) + logger.info(f"[CALLBACK] Response: {response.text}") + + except requests.exceptions.RequestException as webhook_error: + logger.error( + f"[CALLBACK] ❌ Error sending callback to status update endpoint: {str(webhook_error)}" + ) + logger.debug(f"[CALLBACK] URL: {STATUS_UPDATE_URL}") + logger.debug( + f"[CALLBACK] Payload: {json.dumps(callback_payload, indent=2)}" + ) + traceback.print_exc() + + except Exception as unexpected_error: + logger.error( + f"[CALLBACK] ❌ Unexpected error during status update: {str(unexpected_error)}" + ) + traceback.print_exc() + + logger.info("[CALLBACK] Processing completed successfully") + + except Exception as e: + logger.error(f"[CALLBACK] Error in processing: {str(e)}") + traceback.print_exc() + raise + +def main(): + """Main function to handle callback processing.""" + parser = argparse.ArgumentParser(description='Process dataset generation callback') + parser.add_argument('--file-path', required=True, help='File path of the generated dataset') + parser.add_argument('--encoded-results', required=True, help='Encoded results string') + parser.add_argument('--output-json', help='Output JSON file path for response') + + args = parser.parse_args() + + try: + logger.info("🔄 Starting callback processing...") + logger.info(f"File path: {args.file_path}") + logger.info(f"Encoded results length: {len(args.encoded_results)} characters") + + # Process the callback directly (synchronous execution) + process_callback_background(args.file_path, args.encoded_results) + + # Create response + response = { + "message": "Callback processing completed successfully", + "status": "completed", + "file_path": args.file_path + } + + # Output response to file if specified + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(response, f, indent=2) + logger.info(f"✅ Response written to: {args.output_json}") + + # Also output to stdout for shell script + print(json.dumps(response)) + + logger.info("✅ Callback processing completed successfully") + + except Exception as e: + logger.error(f"❌ Error processing callback: {str(e)}") + traceback.print_exc() + error_response = { + "message": f"Callback processing failed: {str(e)}", + "status": "error", + "file_path": args.file_path + } + + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(error_response, f, indent=2) + + print(json.dumps(error_response)) + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/s3_dataset_processor/dataset_generation_callback_processor_v1.py b/src/s3_dataset_processor/dataset_generation_callback_processor_v1.py new file mode 100644 index 00000000..07fa52fb --- /dev/null +++ b/src/s3_dataset_processor/dataset_generation_callback_processor_v1.py @@ -0,0 +1,201 @@ +""" +Standalone script for processing dataset generation callbacks. +Replaces the FastAPI background task with direct synchronous execution. +""" +import sys +import json +import argparse +import logging +import re +import requests +from pathlib import Path +import traceback + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +try: + from services.url_decoder_service import URLDecoderService + url_decoder_service = URLDecoderService() + logger.info("✅ Successfully imported URLDecoderService") +except ImportError as e: + logger.error(f"❌ Failed to import URLDecoderService: {e}") + traceback.print_exc() + sys.exit(1) + +def process_callback_background(file_path: str, encoded_results: str): + """ + Process the dataset generation callback. + This is the same function from s3_processor_api.py but now runs synchronously. + """ + try: + print(f"[CALLBACK] Starting processing for: {file_path}") + + # Extract dataset ID from file path (e.g., output_datasets/single_question/12.json -> 12) + dataset_id_match = re.search(r"/([^/]+)\.json$", file_path) + dataset_id = dataset_id_match.group(1) if dataset_id_match else "unknown" + + logger.info(f"[CALLBACK] Extracted dataset ID: {dataset_id}") + + # Decode the results using the existing service + decoded_results = url_decoder_service.decode_signed_urls(encoded_results) + + logger.info(f"[CALLBACK] Decoded {len(decoded_results)} results") + + # Process the decoded results to create the required payload + agencies = [] + overall_success = True + + for i, result in enumerate(decoded_results): + # Extract agency_id from dataset_metadata + dataset_metadata = result.get("dataset_metadata", {}) + agency_id = dataset_metadata.get("agency_id", "unknown") + success = result.get("success", False) + + sync_status = "Synced_with_CKB" if success else "Sync_with_CKB_Failed" + + agencies.append({"agencyId": agency_id, "syncStatus": sync_status}) + + logger.info( + f"[CALLBACK] Agency {i + 1}: ID={agency_id}, Success={success}, Status={sync_status}" + ) + + if not success: + overall_success = False + + generation_status = ( + "Generation_Success" if overall_success else "Generation_Failed" + ) + + # Create the exact payload format requested + callback_payload = { + "agencies": agencies, + "datasetId": dataset_id, + "generationStatus": generation_status, + } + + # Log the processed callback + logger.info(f"[CALLBACK] {json.dumps(callback_payload, indent=2)}") + logger.info(f"[CALLBACK] Dataset ID: {dataset_id}") + logger.info(f"[CALLBACK] Generation Status: {generation_status}") + logger.info(f"[CALLBACK] Total Agencies: {len(agencies)}") + logger.info( + f"[CALLBACK] Successful Agencies: {len([a for a in agencies if a['syncStatus'] == 'Synced_with_CKB'])}" + ) + logger.info( + f"[CALLBACK] Failed Agencies: {len([a for a in agencies if a['syncStatus'] == 'Sync_with_CKB_Failed'])}" + ) + + STATUS_UPDATE_URL = ( + "http://ruuter-public:8086/global-classifier/agencies/data/generation" + ) + + logger.info(f"[CALLBACK] Sending callback payload to: {STATUS_UPDATE_URL}") + + try: + # Send POST request to the status update endpoint + response = requests.post( + STATUS_UPDATE_URL, + json=callback_payload, + headers={"Content-Type": "application/json"}, + timeout=30, + ) + + logger.info( + f"[CALLBACK] Status update response - HTTP Status: {response.status_code}" + ) + logger.info(f"[CALLBACK] Status update response body: {response.text}") + + if response.status_code == 200: + logger.info( + "[CALLBACK] ✅ Successfully sent callback payload to status update endpoint" + ) + else: + logger.warning( + f"[CALLBACK] ⚠️ Status update endpoint returned non-200 status: {response.status_code}" + ) + logger.info(f"[CALLBACK] Response: {response.text}") + + except requests.exceptions.RequestException as webhook_error: + logger.error( + f"[CALLBACK] ❌ Error sending callback to status update endpoint: {str(webhook_error)}" + ) + logger.debug(f"[CALLBACK] URL: {STATUS_UPDATE_URL}") + logger.debug( + f"[CALLBACK] Payload: {json.dumps(callback_payload, indent=2)}" + ) + + except Exception as unexpected_error: + logger.error( + f"[CALLBACK] ❌ Unexpected error during status update: {str(unexpected_error)}" + ) + + logger.info("[CALLBACK] Processing completed successfully") + + except Exception as e: + logger.error(f"[CALLBACK] Error in processing: {str(e)}") + raise + +def main(): + """Main function to handle callback processing.""" + parser = argparse.ArgumentParser(description='Process dataset generation callback') + parser.add_argument('--file-path', required=True, help='File path of the generated dataset') + parser.add_argument('--encoded-results', required=True, help='Encoded results string') + parser.add_argument('--output-json', help='Output JSON file path for response') + + args = parser.parse_args() + + try: + logger.info("🔄 Starting callback processing...") + logger.info(f"File path: {args.file_path}") + logger.info(f"Encoded results length: {len(args.encoded_results)} characters") + + # Process the callback directly (synchronous execution) + process_callback_background(args.file_path, args.encoded_results) + + # Create response + response = { + "message": "Callback processing completed successfully", + "status": "completed", + "file_path": args.file_path + } + + # Output response to file if specified + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(response, f, indent=2) + logger.info(f"✅ Response written to: {args.output_json}") + + # Also output to stdout for shell script + print(json.dumps(response)) + + logger.info("✅ Callback processing completed successfully") + + except Exception as e: + logger.error(f"❌ Error processing callback: {str(e)}") + error_response = { + "message": f"Callback processing failed: {str(e)}", + "status": "error", + "file_path": args.file_path + } + + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(error_response, f, indent=2) + + print(json.dumps(error_response)) + traceback.print_exc() + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/s3_dataset_processor/download_source_dataset.py b/src/s3_dataset_processor/download_source_dataset.py new file mode 100644 index 00000000..eeab901c --- /dev/null +++ b/src/s3_dataset_processor/download_source_dataset.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +# filepath: c:\Users\charith.bimsara_root\Rootcode\Estonian-Gov-AI\New\Global-Classifier\src\s3_dataset_processor\download_source_dataset.py +""" +Direct Python script for downloading datasets from S3 signed URLs. +Replaces the FastAPI /download-datasets endpoint for CronManager execution. +""" +import sys +import json +import argparse +import logging +from pathlib import Path +import traceback + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules FIRST +# This path corresponds to the volume mount in docker-compose.yml +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +# Now import the services AFTER adding to path +try: + from services.url_decoder_service import URLDecoderService + from services.download_service import DownloadService + from services.extraction_service import ExtractionService + from handlers.response_handler import ResponseHandler + logger.info("✅ Successfully imported all required modules") +except ImportError as e: + logger.error(f"❌ Failed to import required modules: {e}") + logger.error(f"Python path: {sys.path}") + logger.error(f"Script directory exists: {script_dir.exists()}") + if script_dir.exists(): + logger.error(f"Contents of script directory: {list(script_dir.iterdir())}") + sys.exit(1) + +def main(): + """Main function to handle dataset download process.""" + parser = argparse.ArgumentParser(description='Download datasets from S3 signed URLs') + parser.add_argument('--encoded-data', required=True, help='Base64 encoded signed URLs data') + parser.add_argument('--extract-files', action='store_true', default=True, help='Extract downloaded files') + parser.add_argument('--output-json', help='Output file path for results JSON') + + args = parser.parse_args() + + try: + if not args.encoded_data or not isinstance(args.encoded_data, str): + logger.error("'encoded_data' must be a non-empty string") + sys.exit(1) + + logger.info("Initializing services...") + # Initialize services + url_decoder_service = URLDecoderService() + download_service = DownloadService() + extraction_service = ExtractionService() + response_handler = ResponseHandler() + + logger.info("Decoding signed URLs...") + # Decode the data using service + decoded_data = url_decoder_service.decode_signed_urls(args.encoded_data) + logger.info(f"Starting download for {len(decoded_data)} files") + + logger.info("Processing downloads...") + # Process downloads using service + downloaded_files, successful_downloads, failed_downloads = ( + download_service.process_downloads(decoded_data) + ) + + logger.info("Processing extractions...") + # Process extractions using service + extracted_folders = extraction_service.process_extractions( + downloaded_files, args.extract_files + ) + + logger.info("Formatting response...") + # Format response using handler + response = response_handler.format_download_response( + decoded_data, + downloaded_files, + successful_downloads, + failed_downloads, + extracted_folders, + ) + + # Output results + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(response.dict(), f, indent=2) + logger.info(f"Results written to {args.output_json}") + else: + print(json.dumps(response.dict(), indent=2)) + + # Log summary + logger.info(f"Download completed: {successful_downloads} successful, {failed_downloads} failed") + logger.info(f"Extracted folders: {len(extracted_folders)}") + + # Exit with appropriate code + sys.exit(0 if response.success else 1) + + except ValueError as e: + logger.error(f"Decoding error: {str(e)}") + traceback.print_exc() + sys.exit(1) + except Exception as e: + logger.error(f"Internal error: {str(e)}") + traceback.print_exc() + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/s3_dataset_processor/fetch_chunk_without_filter.py b/src/s3_dataset_processor/fetch_chunk_without_filter.py new file mode 100644 index 00000000..2aa5e54d --- /dev/null +++ b/src/s3_dataset_processor/fetch_chunk_without_filter.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 +""" +Python script to download a specific chunk from S3 bucket and return as JSON. +Used by CronManager endpoint to fetch individual chunks. +""" +import sys +import json +import argparse +import logging +import os +import tempfile +from pathlib import Path +import traceback + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +def log(message): + """Log to stderr to keep stdout clean for JSON output""" + logger.info(f"🔍 [CHUNK DOWNLOAD] {message}") + +try: + from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() + log("Successfully imported S3FerryService") +except ImportError as e: + log(f"Failed to import S3FerryService: {e}") + sys.exit(1) + +def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: + """ + Download a specific chunk from S3 bucket. + + Args: + dataset_id: Dataset ID + page_num: Page number (chunk number) + + Returns: + Dictionary containing chunk data or error information + """ + try: + log(f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}") + + # Create temporary directory for download + temp_dir = tempfile.mkdtemp(prefix="chunk_download_") + log(f"Created temporary directory: {temp_dir}") + + # Define S3 source path and local destination + chunk_filename = f"{page_num}.json" + s3_source_path = f"{dataset_id}/{chunk_filename}" + local_dest_path = f"temp_chunks/{chunk_filename}" + + # Create the temp_chunks directory if it doesn't exist + temp_chunks_dir = "temp_chunks" + os.makedirs(temp_chunks_dir, exist_ok=True) + log(f"Created/verified temp directory: {temp_chunks_dir}") + + log(f"S3 source path: {s3_source_path}") + log(f"Local destination: {local_dest_path}") + + # Download chunk from S3 using S3Ferry service + response = s3_ferry_service.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3" + ) + + log(f"S3Ferry response status: {response.status_code}") + log(f"S3Ferry response body: {response.text}") + + if response.status_code in [200, 201]: + # Read the downloaded chunk file + local_file_path = f"/app/{local_dest_path}" + + if os.path.exists(local_file_path): + log(f"Successfully downloaded chunk to: {local_file_path}") + + # Read and parse the chunk data + with open(local_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Clean up the downloaded file + os.remove(local_file_path) + log(f"Cleaned up downloaded file: {local_file_path}") + + # Remove empty directory if it exists + try: + os.rmdir(os.path.dirname(local_file_path)) + except OSError: + pass # Directory not empty or doesn't exist + + return { + "success": True, + "dataset_id": dataset_id, + "page_num": page_num, + "chunk_data": chunk_data, + "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}" + } + else: + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": f"Downloaded file not found at: {local_file_path}", + "message": "File download completed but file not accessible" + } + else: + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": f"S3 download failed: HTTP {response.status_code}", + "response_body": response.text, + "message": f"Failed to download chunk {page_num} from S3" + } + + except Exception as e: + log(f"Error during chunk download: {str(e)}") + traceback.print_exc() + return { + "success": False, + "dataset_id": dataset_id, + "page_num": page_num, + "error": str(e), + "message": "Internal error during chunk download" + } + +def main(): + """Main function to handle chunk download process.""" + parser = argparse.ArgumentParser(description='Download a specific chunk from S3') + parser.add_argument('--dataset-id', required=True, help='Dataset ID') + parser.add_argument('--page-num', required=True, type=int, help='Page number (chunk number)') + parser.add_argument('--output-json', help='Output file path for results JSON') + + args = parser.parse_args() + + try: + log(f"Processing chunk download request - Dataset: {args.dataset_id}, Page: {args.page_num}") + + # Download the chunk + result = download_chunk_from_s3(args.dataset_id, args.page_num) + + # Output results + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(result, f, indent=2) + log(f"Results written to {args.output_json}") + else: + # Output ONLY the JSON to stdout (this goes to CronManager) + print(json.dumps(result)) + + log(f"Chunk download completed - Success: {result['success']}") + + # Exit with appropriate code + sys.exit(0 if result['success'] else 1) + + except Exception as e: + log(f"Internal error: {str(e)}") + traceback.print_exc() + + error_result = { + "success": False, + "dataset_id": args.dataset_id, + "page_num": args.page_num, + "error": str(e), + "message": "Script execution failed" + } + + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(error_result, f, indent=2) + else: + print(json.dumps(error_result)) + + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/s3_dataset_processor/fetch_multi_chunk.py b/src/s3_dataset_processor/fetch_multi_chunk.py new file mode 100644 index 00000000..18cf8228 --- /dev/null +++ b/src/s3_dataset_processor/fetch_multi_chunk.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 +""" +Python script to download multiple chunks from S3 bucket, aggregate them, and return as JSON. +Used by CronManager endpoint to fetch and combine multiple chunks. +""" +import sys +import json +import argparse +import logging +import os +import tempfile +from pathlib import Path +import traceback +from typing import List, Dict, Any + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean +) +logger = logging.getLogger(__name__) + +# Add the s3_dataset_processor to Python path to import modules +script_dir = Path('/app/src/s3_dataset_processor') +sys.path.insert(0, str(script_dir)) + +def log(message): + """Log to stderr to keep stdout clean for JSON output""" + logger.info(f"📦 [MULTI CHUNK] {message}") + +try: + from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() + log("Successfully imported S3FerryService") +except ImportError as e: + log(f"Failed to import S3FerryService: {e}") + sys.exit(1) + +def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, Any]: + """ + Download a single chunk from S3 bucket. + + Args: + dataset_id: Dataset ID + chunk_id: Chunk ID/number + + Returns: + Dictionary containing chunk data or error information + """ + try: + log(f"Downloading chunk {chunk_id} from dataset {dataset_id}") + + # Define S3 source path and local destination + chunk_filename = f"{chunk_id}.json" + s3_source_path = f"{dataset_id}/{chunk_filename}" + local_dest_path = f"temp_chunks/{chunk_filename}" + + # Create the temp_chunks directory if it doesn't exist + temp_chunks_dir = "/app/temp_chunks" + os.makedirs(temp_chunks_dir, exist_ok=True) + + log(f"S3 source path: {s3_source_path}") + log(f"Local destination: {local_dest_path}") + + # Download chunk from S3 using S3Ferry service + response = s3_ferry_service.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3" + ) + + log(f"S3Ferry response status for chunk {chunk_id}: {response.status_code}") + + if response.status_code in [200, 201]: + # Read the downloaded chunk file + local_file_path = f"/app/{local_dest_path}" + + if os.path.exists(local_file_path): + log(f"Successfully downloaded chunk {chunk_id} to: {local_file_path}") + + # Read and parse the chunk data + with open(local_file_path, 'r', encoding='utf-8') as f: + chunk_data = json.load(f) + + # Clean up the downloaded file + os.remove(local_file_path) + log(f"Cleaned up downloaded file: {local_file_path}") + + return { + "success": True, + "chunk_id": chunk_id, + "chunk_data": chunk_data, + "message": f"Successfully downloaded chunk {chunk_id}" + } + else: + return { + "success": False, + "chunk_id": chunk_id, + "error": f"Downloaded file not found at: {local_file_path}", + "message": f"Chunk {chunk_id} download completed but file not accessible" + } + else: + return { + "success": False, + "chunk_id": chunk_id, + "error": f"S3 download failed: HTTP {response.status_code}", + "response_body": response.text, + "message": f"Failed to download chunk {chunk_id} from S3" + } + + except Exception as e: + log(f"Error downloading chunk {chunk_id}: {str(e)}") + traceback.print_exc() + return { + "success": False, + "chunk_id": chunk_id, + "error": str(e), + "message": f"Internal error during chunk {chunk_id} download" + } + +def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, Any]: + """ + Download multiple chunks from S3 and aggregate them. + + Args: + dataset_id: Dataset ID + chunk_ids: List of chunk IDs to download + + Returns: + Dictionary containing aggregated chunk data or error information + """ + try: + log(f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}") + + download_results = [] + successful_chunks = [] + failed_chunks = [] + aggregated_data = [] + total_items = 0 + + # Download each chunk + for chunk_id in chunk_ids: + result = download_single_chunk_from_s3(dataset_id, chunk_id) + download_results.append(result) + + if result["success"]: + successful_chunks.append(chunk_id) + chunk_data = result["chunk_data"] + + # Extract data array from chunk + chunk_items = chunk_data.get("data", []) + aggregated_data.extend(chunk_items) + total_items += len(chunk_items) + + log(f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation") + else: + failed_chunks.append(chunk_id) + log(f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}") + + # Prepare chunk info from the first successful chunk (if any) + chunk_info = {} + if successful_chunks and download_results: + first_successful = next((r for r in download_results if r["success"]), None) + if first_successful: + original_chunk_info = first_successful["chunk_data"].get("chunk_info", {}) + chunk_info = { + "original_dataset": original_chunk_info.get("original_dataset", dataset_id), + "requested_chunks": chunk_ids, + "successful_chunks": successful_chunks, + "failed_chunks": failed_chunks, + "total_chunks_requested": len(chunk_ids), + "successful_downloads": len(successful_chunks), + "failed_downloads": len(failed_chunks), + "total_aggregated_items": total_items, + "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" if successful_chunks else "none" + } + + # Prepare the final aggregated payload + if successful_chunks: + aggregated_payload = { + "success": True, + "dataset_id": dataset_id, + "chunk_info": chunk_info, + "aggregated_data": aggregated_data, + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": len(successful_chunks), + "failed_downloads": len(failed_chunks), + "successful_chunk_ids": successful_chunks, + "failed_chunk_ids": failed_chunks, + "total_items_aggregated": total_items + }, + "download_details": download_results, + "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks" + } + else: + aggregated_payload = { + "success": False, + "dataset_id": dataset_id, + "chunk_info": chunk_info, + "aggregated_data": [], + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": 0, + "failed_downloads": len(failed_chunks), + "successful_chunk_ids": [], + "failed_chunk_ids": failed_chunks, + "total_items_aggregated": 0 + }, + "download_details": download_results, + "error": "All chunk downloads failed", + "message": f"Failed to download any of the {len(chunk_ids)} requested chunks" + } + + log(f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}") + log(f"Total items aggregated: {total_items}") + + return aggregated_payload + + except Exception as e: + log(f"Error during multi-chunk aggregation: {str(e)}") + traceback.print_exc() + return { + "success": False, + "dataset_id": dataset_id, + "chunk_info": {}, + "aggregated_data": [], + "download_summary": { + "total_requested": len(chunk_ids), + "successful_downloads": 0, + "failed_downloads": len(chunk_ids), + "successful_chunk_ids": [], + "failed_chunk_ids": chunk_ids, + "total_items_aggregated": 0 + }, + "error": str(e), + "message": "Internal error during multi-chunk aggregation" + } + +def parse_chunk_ids(chunk_ids_str: str) -> List[int]: + """ + Parse chunk IDs from string format "1 2 3" to list [1, 2, 3]. + + Args: + chunk_ids_str: String containing space-separated chunk IDs + + Returns: + List of integer chunk IDs + """ + try: + # Split by spaces and convert to integers + chunk_ids = [int(chunk_id.strip()) for chunk_id in chunk_ids_str.split() if chunk_id.strip()] + log(f"Parsed chunk IDs: {chunk_ids}") + return chunk_ids + except ValueError as e: + log(f"Error parsing chunk IDs '{chunk_ids_str}': {str(e)}") + raise ValueError(f"Invalid chunk IDs format. Expected space-separated integers, got: '{chunk_ids_str}'") + +def main(): + """Main function to handle multi-chunk download and aggregation process.""" + parser = argparse.ArgumentParser(description='Download and aggregate multiple chunks from S3') + parser.add_argument('--dataset-id', required=True, help='Dataset ID') + parser.add_argument('--chunk-ids', required=True, help='Space-separated chunk IDs (e.g., "1 2 3")') + parser.add_argument('--output-json', help='Output file path for results JSON') + + args = parser.parse_args() + + try: + log(f"Processing multi-chunk request - Dataset: {args.dataset_id}, Chunk IDs: {args.chunk_ids}") + + # Parse chunk IDs + chunk_ids = parse_chunk_ids(args.chunk_ids) + + if not chunk_ids: + raise ValueError("No valid chunk IDs provided") + + # Download and aggregate chunks + result = download_multiple_chunks(args.dataset_id, chunk_ids) + + # Output results + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(result, f, indent=2) + log(f"Results written to {args.output_json}") + else: + # Output ONLY the JSON to stdout (this goes to CronManager) + print(json.dumps(result)) + + log(f"Multi-chunk processing completed - Success: {result['success']}") + + # Exit with appropriate code + sys.exit(0 if result['success'] else 1) + + except Exception as e: + log(f"Internal error: {str(e)}") + traceback.print_exc() + + error_result = { + "success": False, + "dataset_id": args.dataset_id, + "chunk_ids": args.chunk_ids, + "error": str(e), + "message": "Script execution failed" + } + + if args.output_json: + with open(args.output_json, 'w') as f: + json.dump(error_result, f, indent=2) + else: + print(json.dumps(error_result)) + + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/s3_dataset_processor/models/schemas.py b/src/s3_dataset_processor/models/schemas.py index c892ce17..c31722ce 100644 --- a/src/s3_dataset_processor/models/schemas.py +++ b/src/s3_dataset_processor/models/schemas.py @@ -1,7 +1,7 @@ """Pydantic models for API requests and responses.""" from pydantic import BaseModel -from typing import List, Dict, Optional +from typing import List, Dict, Optional, Any class DownloadRequest(BaseModel): @@ -15,6 +15,7 @@ class DownloadedFile(BaseModel): """Model for downloaded file information.""" agency_id: str + agency_name: str original_filename: str local_path: str file_size: int @@ -33,3 +34,36 @@ class DownloadResponse(BaseModel): downloaded_files: List[DownloadedFile] extracted_folders: List[Dict[str, str]] total_extracted_folders: int + +class ChunkDownloadRequest(BaseModel): + """Request model for downloading a single chunk.""" + dataset_id: str + page_num: int + + +class MultiChunkDownloadRequest(BaseModel): + """Request model for downloading multiple chunks.""" + dataset_id: str + chunk_ids: List[int] + + +class ChunkDownloadResponse(BaseModel): + """Response model for single chunk download.""" + success: bool + dataset_id: str + page_num: Optional[int] = None + chunk_data: Optional[Dict[str, Any]] = None + error: Optional[str] = None + message: str + + +class MultiChunkDownloadResponse(BaseModel): + """Response model for multi-chunk download and aggregation.""" + success: bool + dataset_id: str + chunk_info: Optional[Dict[str, Any]] = None + aggregated_data: List[Dict[str, Any]] + download_summary: Dict[str, Any] + download_details: Optional[List[Dict[str, Any]]] = None + error: Optional[str] = None + message: str diff --git a/src/s3_dataset_processor/s3_processor_api.py b/src/s3_dataset_processor/s3_processor_api.py deleted file mode 100644 index 6421eaa6..00000000 --- a/src/s3_dataset_processor/s3_processor_api.py +++ /dev/null @@ -1,269 +0,0 @@ -"""FastAPI application with refactored structure.""" - -from fastapi import FastAPI, HTTPException, BackgroundTasks -import uvicorn -import re -import json -import requests -from loguru import logger -import sys - -from config.settings import settings -from models.schemas import DownloadRequest, DownloadResponse -from services.url_decoder_service import URLDecoderService -from services.download_service import DownloadService -from services.extraction_service import ExtractionService -from handlers.response_handler import ResponseHandler - -logger.remove() -# Add stdout handler with your preferred format -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - -# Initialize FastAPI app -app = FastAPI( - title=settings.API_TITLE, - description=settings.API_DESCRIPTION, - version=settings.API_VERSION, -) - -# Initialize services -url_decoder_service = URLDecoderService() -download_service = DownloadService() -extraction_service = ExtractionService() -response_handler = ResponseHandler() - - -def process_callback_background(file_path: str, encoded_results: str): - """ - Background function to process the dataset generation callback. - This runs asynchronously after returning 200 to the caller. - """ - try: - print(f"[CALLBACK] Starting background processing for: {file_path}") - - # Extract dataset ID from file path (e.g., output_datasets/single_question/12.json -> 12) - dataset_id_match = re.search(r"/([^/]+)\.json$", file_path) - dataset_id = dataset_id_match.group(1) if dataset_id_match else "unknown" - - logger.info(f"[CALLBACK] Extracted dataset ID: {dataset_id}") - - # Decode the results using the existing service - decoded_results = url_decoder_service.decode_signed_urls(encoded_results) - - logger.info(f"[CALLBACK] Decoded {len(decoded_results)} results") - - # Process the decoded results to create the required payload - agencies = [] - overall_success = True - - for i, result in enumerate(decoded_results): - # Extract agency_id from dataset_metadata - dataset_metadata = result.get("dataset_metadata", {}) - agency_id = dataset_metadata.get("agency_id", "unknown") - success = result.get("success", False) - - sync_status = "Synced_with_CKB" if success else "Sync_with_CKB_Failed" - - agencies.append({"agencyId": agency_id, "syncStatus": sync_status}) - - logger.info( - f"[CALLBACK] Agency {i + 1}: ID={agency_id}, Success={success}, Status={sync_status}" - ) - - if not success: - overall_success = False - - generation_status = ( - "Generation_Success" if overall_success else "Generation_Failed" - ) - - # Create the exact payload format requested - callback_payload = { - "agencies": agencies, - "datasetId": dataset_id, - "generationStatus": generation_status, - } - - # Log the processed callback - logger.info(f"[CALLBACK] {json.dumps(callback_payload, indent=2)}") - logger.info(f"[CALLBACK] Dataset ID: {dataset_id}") - logger.info(f"[CALLBACK] Generation Status: {generation_status}") - logger.info(f"[CALLBACK] Total Agencies: {len(agencies)}") - logger.info( - f"[CALLBACK] Successful Agencies: {len([a for a in agencies if a['syncStatus'] == 'Synced_with_CKB'])}" - ) - logger.info( - f"[CALLBACK] Failed Agencies: {len([a for a in agencies if a['syncStatus'] == 'Sync_with_CKB_Failed'])}" - ) - - STATUS_UPDATE_URL = ( - "http://ruuter-public:8086/global-classifier/agencies/data/generation" - ) - - logger.info(f"[CALLBACK] Sending callback payload to: {STATUS_UPDATE_URL}") - - try: - # Send POST request to the status update endpoint - response = requests.post( - STATUS_UPDATE_URL, - json=callback_payload, - headers={"Content-Type": "application/json"}, - timeout=30, - ) - - logger.info( - f"[CALLBACK] Status update response - HTTP Status: {response.status_code}" - ) - logger.info(f"[CALLBACK] Status update response body: {response.text}") - - if response.status_code == 200: - logger.info( - "[CALLBACK] ✅ Successfully sent callback payload to status update endpoint" - ) - else: - logger.warning( - f"[CALLBACK] ⚠️ Status update endpoint returned non-200 status: {response.status_code}" - ) - logger.info(f"[CALLBACK] Response: {response.text}") - - except requests.exceptions.RequestException as webhook_error: - logger.error( - f"[CALLBACK] ❌ Error sending callback to status update endpoint: {str(webhook_error)}" - ) - logger.debug(f"[CALLBACK] URL: {STATUS_UPDATE_URL}") - logger.debug( - f"[CALLBACK] Payload: {json.dumps(callback_payload, indent=2)}" - ) - - except Exception as unexpected_error: - logger.error( - f"[CALLBACK] ❌ Unexpected error during status update: {str(unexpected_error)}" - ) - - except Exception as e: - logger.error(f"[CALLBACK] Error in background processing: {str(e)}") - logger.debug(f"[CALLBACK] File path: {file_path}") - logger.debug( - f"[CALLBACK] Encoded results length: {len(encoded_results) if encoded_results else 0}" - ) - # You might want to implement retry logic or error handling here - - -@app.get("/") -async def root(): - """Root endpoint with API information.""" - return { - "service": settings.API_TITLE, - "version": settings.API_VERSION, - "description": settings.API_DESCRIPTION, - "endpoints": { - "health": "/health", - "decode": "/decode-urls (POST)", - "download": "/download-datasets (POST)", - "docs": "/docs", - }, - } - - -@app.get("/health") -async def health_check(): - """Health check endpoint.""" - return { - "status": "healthy", - "service": "s3-dataset-processor", - "version": settings.API_VERSION, - "data_dir": settings.DATA_DIR, - } - - -@app.post("/download-datasets", response_model=DownloadResponse) -async def download_datasets(request: DownloadRequest): - """ - Download dataset files from signed URLs and extract if needed. - - Args: - request: Request containing encoded data and download options - - Returns: - Download results with file locations - """ - try: - if not request.encoded_data or not isinstance(request.encoded_data, str): - raise HTTPException( - status_code=400, detail="'encoded_data' must be a non-empty string" - ) - - # Decode the data using service - decoded_data = url_decoder_service.decode_signed_urls(request.encoded_data) - logger.info(f"Starting download for {len(decoded_data)} files") - - # Process downloads using service - downloaded_files, successful_downloads, failed_downloads = ( - download_service.process_downloads(decoded_data) - ) - - # Process extractions using service - extracted_folders = extraction_service.process_extractions( - downloaded_files, request.extract_files - ) - - # Format response using handler - response = response_handler.format_download_response( - decoded_data, - downloaded_files, - successful_downloads, - failed_downloads, - extracted_folders, - ) - - return response - - except ValueError as e: - raise HTTPException(status_code=400, detail=f"Decoding error: {str(e)}") - except Exception as e: - raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") - - -@app.post("/process-generation-callback") -async def process_generation_callback(request: dict, background_tasks: BackgroundTasks): - """ - Process dataset generation callback in the background. - Returns 200 immediately and processes the callback asynchronously. - - Args: - request: Dictionary containing 'file_path' and 'results' (encoded) - background_tasks: FastAPI background tasks handler - - Returns: - Immediate 200 response, actual processing happens in background - """ - try: - file_path = request.get("file_path", "") - encoded_results = request.get("results", "") - - if not encoded_results or not isinstance(encoded_results, str): - raise HTTPException( - status_code=400, detail="'results' must be a non-empty string" - ) - - if not file_path: - raise HTTPException(status_code=400, detail="'file_path' is required") - - # Add the background task - background_tasks.add_task( - process_callback_background, file_path, encoded_results - ) - - # Return immediate response - return { - "message": "Callback processing started", - "status": "accepted", - "file_path": file_path, - } - - except Exception as e: - raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") - - -if __name__ == "__main__": - uvicorn.run(app, host="0.0.0.0", port=8001) diff --git a/src/s3_dataset_processor/services/download_service.py b/src/s3_dataset_processor/services/download_service.py index ea56bf71..1ca27964 100644 --- a/src/s3_dataset_processor/services/download_service.py +++ b/src/s3_dataset_processor/services/download_service.py @@ -4,15 +4,18 @@ import requests from typing import List, Dict, Any from urllib.parse import urlparse - from config.settings import settings from models.schemas import DownloadedFile -from loguru import logger import sys - -logger.remove() -# Add stdout handler with your preferred format -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") +import logging +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) class DownloadService: @@ -68,6 +71,7 @@ def process_downloads(self, decoded_data: List[Dict[str, Any]]) -> tuple: try: for entry in decoded_data: agency_id = entry.get("agencyId", "unknown") + agency_name = entry.get("agencyName", "Unknown Agency") signed_url = entry.get("dataUrl", "") if not signed_url: @@ -84,13 +88,14 @@ def process_downloads(self, decoded_data: List[Dict[str, Any]]) -> tuple: # Download file to data directory local_file_path = os.path.join(self.data_dir, original_filename) - logger.info(f"Downloading {original_filename} for agency {agency_id}") + logger.info(f"Downloading {original_filename} for agency {agency_id} with name {agency_name}") if self.download_file(signed_url, local_file_path): file_size = os.path.getsize(local_file_path) downloaded_file = DownloadedFile( agency_id=agency_id, + agency_name=agency_name, original_filename=original_filename, local_path=local_file_path, file_size=file_size, diff --git a/src/s3_dataset_processor/services/extraction_service.py b/src/s3_dataset_processor/services/extraction_service.py index a461fe8e..a81318de 100644 --- a/src/s3_dataset_processor/services/extraction_service.py +++ b/src/s3_dataset_processor/services/extraction_service.py @@ -7,11 +7,16 @@ from config.settings import settings from models.schemas import DownloadedFile -from loguru import logger import sys - -logger.remove() -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") +import logging +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(sys.stdout)] +) +logger = logging.getLogger(__name__) class ExtractionService: @@ -63,7 +68,8 @@ def process_extractions( continue # Extract agency name from filename - agency_name = downloaded_file.original_filename.replace(".zip", "") + # agency_name = downloaded_file.original_filename.replace(".zip", "") + agency_name = downloaded_file.agency_name agency_dir = os.path.join(self.data_dir, agency_name) os.makedirs(agency_dir, exist_ok=True) @@ -85,7 +91,7 @@ def process_extractions( # Add to extracted folders list extracted_folders.append( - {"agency_id": downloaded_file.agency_id, "folder_path": agency_dir} + {"agency_id": downloaded_file.agency_id, "agency_name": downloaded_file.agency_name, "folder_path": agency_dir} ) # Remove the ZIP file after successful extraction diff --git a/src/s3_dataset_processor/services/s3_ferry_service.py b/src/s3_dataset_processor/services/s3_ferry_service.py new file mode 100644 index 00000000..a7e3d2fc --- /dev/null +++ b/src/s3_dataset_processor/services/s3_ferry_service.py @@ -0,0 +1,147 @@ +"""Service for S3Ferry file transfer operations.""" + +import requests +import logging +import traceback +from typing import Dict, Any + +# Configure logging +logger = logging.getLogger(__name__) + +class S3Ferry: + """Service class for handling S3Ferry file transfer operations.""" + + def __init__(self, base_url: str = "http://gc-s3-ferry:3000"): + """ + Initialize the S3Ferry service. + + Args: + base_url: Base URL for the S3Ferry service + """ + self.base_url = base_url + self.url = f"{base_url}/v1/files/copy" + logger.info(f"S3Ferry service initialized with URL: {self.url}") + + def transfer_file(self, destination_file_path: str, destination_storage_type: str, + source_file_path: str, source_storage_type: str) -> requests.Response: + """ + Transfer a file using S3Ferry service. + + Args: + destination_file_path: Path where the file should be stored in destination + destination_storage_type: Type of destination storage (e.g., 's3', 'local') + source_file_path: Path of the source file + source_storage_type: Type of source storage (e.g., 'local', 's3') + + Returns: + Response object from the S3Ferry service + """ + try: + payload = self.get_s3_ferry_payload( + destination_file_path, + destination_storage_type, + source_file_path, + source_storage_type + ) + + logger.info(f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}") + logger.debug(f"[S3_FERRY] Payload: {payload}") + + response = requests.post( + self.url, + json=payload, + headers={"Content-Type": "application/json"}, + timeout=60 + ) + + logger.info(f"[S3_FERRY] Transfer response status: {response.status_code}") + + # Accept both 200 (OK) and 201 (Created) as success + if response.status_code not in [200, 201]: + logger.error(f"[S3_FERRY] Transfer failed: {response.text}") + else: + logger.info(f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})") + + return response + + except Exception as e: + logger.error(f"[S3_FERRY] Error during file transfer: {str(e)}") + traceback.print_exc() + raise + + def get_s3_ferry_payload(self, destination_file_path: str, destination_storage_type: str, + source_file_path: str, source_storage_type: str) -> Dict[str, str]: + """ + Generate S3Ferry payload for file transfer. + + Args: + destination_file_path: Path where the file should be stored in destination + destination_storage_type: Type of destination storage + source_file_path: Path of the source file + source_storage_type: Type of source storage + + Returns: + Dictionary containing the S3Ferry payload + """ + payload = { + "destinationFilePath": destination_file_path, + "destinationStorageType": destination_storage_type, + "sourceFilePath": source_file_path, + "sourceStorageType": source_storage_type + } + + return payload + + def upload_to_s3(self, local_file_path: str, s3_destination_path: str) -> requests.Response: + """ + Convenience method to upload a local file to S3. + + Args: + local_file_path: Path to the local file + s3_destination_path: S3 destination path (e.g., 'bucket/folder/file.json') + + Returns: + Response object from the S3Ferry service + """ + return self.transfer_file( + destination_file_path=s3_destination_path, + destination_storage_type="S3", + source_file_path=local_file_path, + source_storage_type="FS" + ) + + def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> requests.Response: + """ + Convenience method to download a file from S3 to local storage. + + Args: + s3_source_path: S3 source path (e.g., 'bucket/folder/file.json') + local_destination_path: Local destination path + + Returns: + Response object from the S3Ferry service + """ + return self.transfer_file( + destination_file_path=local_destination_path, + destination_storage_type="local", + source_file_path=s3_source_path, + source_storage_type="s3" + ) + + # def copy_s3_to_s3(self, source_s3_path: str, destination_s3_path: str) -> requests.Response: + # """ + # Convenience method to copy files between S3 locations. + + # Args: + # source_s3_path: Source S3 path + # destination_s3_path: Destination S3 path + + # Returns: + # Response object from the S3Ferry service + # """ + # return self.transfer_file( + # destination_file_path=destination_s3_path, + # destination_storage_type="s3", + # source_file_path=source_s3_path, + # source_storage_type="s3" + # ) \ No newline at end of file From c7b65060d55738fc853215825acf90b57cb0db9a Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 26 Jun 2025 01:30:20 +0530 Subject: [PATCH 055/195] feat: add new Handlebars templates and update dataset handling in services --- .../hbs/get_filtered_chunks.handlebars | 1 + .../hbs/get_single_chunk_data.handlebars | 1 + DSL/DMapper/global-classifier/lib/helpers.js | 151 +++++++++++++++--- ...ta.sql => insert-data-chunk-metadata .sql} | 0 .../GET/datasets/overview.yml | 79 ++++++++- GUI/src/pages/ViewDataset/index.tsx | 66 +++++--- GUI/src/services/datasets.ts | 2 +- 7 files changed, 259 insertions(+), 41 deletions(-) create mode 100644 DSL/DMapper/global-classifier/hbs/get_filtered_chunks.handlebars create mode 100644 DSL/DMapper/global-classifier/hbs/get_single_chunk_data.handlebars rename DSL/Resql/global-classifier/POST/{insert-metadata.sql => insert-data-chunk-metadata .sql} (100%) diff --git a/DSL/DMapper/global-classifier/hbs/get_filtered_chunks.handlebars b/DSL/DMapper/global-classifier/hbs/get_filtered_chunks.handlebars new file mode 100644 index 00000000..c828c673 --- /dev/null +++ b/DSL/DMapper/global-classifier/hbs/get_filtered_chunks.handlebars @@ -0,0 +1 @@ +{{{filterDataByAgency aggregatedData startIndex agencyId}}} \ No newline at end of file diff --git a/DSL/DMapper/global-classifier/hbs/get_single_chunk_data.handlebars b/DSL/DMapper/global-classifier/hbs/get_single_chunk_data.handlebars new file mode 100644 index 00000000..77a6344d --- /dev/null +++ b/DSL/DMapper/global-classifier/hbs/get_single_chunk_data.handlebars @@ -0,0 +1 @@ +{{{getSingleChunkData dataChunk}}} \ No newline at end of file diff --git a/DSL/DMapper/global-classifier/lib/helpers.js b/DSL/DMapper/global-classifier/lib/helpers.js index d55169d5..5242b572 100644 --- a/DSL/DMapper/global-classifier/lib/helpers.js +++ b/DSL/DMapper/global-classifier/lib/helpers.js @@ -202,17 +202,42 @@ export function extractNewAgencies(gcAgencies, centopsAgencies) { * @param {string|number} pageNum * @returns {Object} Parsed JSON content of the file */ -export function getAllChunksFromS3(datasetId, pageNum) { - - return JSON.stringify({ - data: [ - { id: 1, question: "How do I renew my passport?", clientName: "Tax Department", clientId: "12" }, - { id: 2, question: "What are the tax filing deadlines?", clientName: "Tax Department", clientId: "12" }, - { id: 3, question: "How can I apply for unemployment benefits?", clientName: "Tax Department", clientId: "12" }, - { id: 4, question: "Where can I get my birth certificate?", clientName: "Tax Department", clientId: "12" }, - { id: 5, question: "How do I register a new business?", clientName: "Tax Department", clientId: "12" }, - ] - }); +export function getSingleChunkData(dataChunk) { + const data = [ + { + "agency_id": "1", + "id": "1", + "question": "Kas kasutajad saavad sarnaselt e-teenuse teiste liikemediakanid sisselogumaast vältimaks, millele lisaks tuleb e-teenuse autentimine?" + }, + { + "agency_id": "2", + "id": "2", + "question": "Kõige järgneisel näitus on, kuidas autentimine koos seansihaldusega toimib e-teenustes?" + }, + { + "agency_id": "1", + "id": "3", + "question": "Mida kasutajad peavad teada, et autentimisteenuse kasutajaga seotud e-teenuse saamine?" + }, + { + "agency_id": "1", + "id": "4", + "question": "Kõige algune asi on, et kui sissenootad esimena Riigi autentimisteenusega, saab midagi tegi?" + }, + { + "agency_id": "3", + "id": "5", + "question": "Kui soovid siseneda mõnda riiklikku e-teenusesse, pead end esmalt autentima ehk tõendama, et oled see, kes väiduta end olevat?" + } + ]; + const mapped = data.map(item => ({ + clientId: item.agency_id, + id: item.id, + clientName: item.agency_id, + question: item.question + })); + + return JSON.stringify(mapped); } export function getPaginatedChunkIds(chunks, agencyId, pageNum, pageSize = 5) { @@ -223,9 +248,7 @@ export function getPaginatedChunkIds(chunks, agencyId, pageNum, pageSize = 5) { let foundPage = false; for (const chunk of chunks) { - // Robustly parse included_agencies - let agencies=JSON.parse(chunk.includedAgencies.value) - + let agencies = JSON.parse(chunk.includedAgencies.value) const count = agencies.filter(a => String(a) === String(agencyId)).length; if (count === 0) continue; @@ -245,7 +268,6 @@ export function getPaginatedChunkIds(chunks, agencyId, pageNum, pageSize = 5) { resultChunks.push(chunk.chunkId || chunk.chunkId); collected += count; - // If we've collected enough, stop if (collected >= pageSize) break; agencyRecordIndex += count; @@ -253,9 +275,102 @@ export function getPaginatedChunkIds(chunks, agencyId, pageNum, pageSize = 5) { return JSON.stringify( { - chunks: resultChunks, - startIndex: startIndex - } + chunks: resultChunks?.join(' '), + startIndex: startIndex + } ); } +export function filterDataByAgency(aggregatedData, startIndex, agencyId, pageSize=5) { + const aggregatedData = [ + { + "agency_id": "1", + "id": "1", + "question": "Kas kasutajad saavad sarnaselt e-teenuse teiste liikemediakanid sisselogumaast vältimaks, millele lisaks tuleb e-teenuse autentimine?" + }, + { + "agency_id": "2", + "id": "2", + "question": "Kõige järgneisel näitus on, kuidas autentimine koos seansihaldusega toimib e-teenustes?" + }, + { + "agency_id": "1", + "id": "3", + "question": "Mida kasutajad peavad teada, et autentimisteenuse kasutajaga seotud e-teenuse saamine?" + }, + { + "agency_id": "1", + "id": "4", + "question": "Kõige algune asi on, et kui sissenootad esimena Riigi autentimisteenusega, saab midagi tegi?" + }, + { + "agency_id": "3", + "id": "5", + "question": "Kui soovid siseneda mõnda riiklikku e-teenusesse, pead end esmalt autentima ehk tõendama, et oled see, kes väiduta end olevat?" + }, + { + "agency_id": "1", + "id": "6", + "question": "Mida autentsimine on ja kuidas kasutajad siseüleskirjastavad?" + }, + { + "agency_id": "2", + "id": "7", + "question": "Mis on 'sisene iseteenindusse' ja kuidas see toimib Riigi autentimisteenusesse juures?" + }, + { + "agency_id": "3", + "id": "8", + "question": "Kõige paremas kohas autentimisteenuses, millel on MOBILID, sisestaab kasutajatele võõrüluseid e-teenuseid ilma lõpetamata, et kõiki ssiselugejad on vältunud uue silsitud." + }, + { + "agency_id": "3", + "id": "9", + "question": "Kõige algune nõu on, et sisestada riiklikud e-teenustes toiminev sisse. Kui kasutaja e-teenuste leidul vahetab sisse Mobiil-ID, saab näiteks sarnaselt kasutada ühekordset sisselogamist." + }, + { + "agency_id": "2", + "id": "10", + "question": "Mida teema on esimiselt ei rahuliseeritavas DigiDoc4 rakendusesõitu, et sertifikaatide usaldusnimekirja uuendamine ei ole edukane?" + }, + { + "agency_id": "2", + "id": "11", + "question": "Mida teh kedagi, kui DigiDoc4 rakenduse veateade ütib, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus?" + }, + { + "agency_id": "3", + "id": "12", + "question": "Mida teh tehdä, kun DigiDoc4 rakenduse veateade ütib, että sertifikaatide usaldusnimekirja on uudistettu epäonnistuneesti?" + }, + { + "agency_id": "2", + "id": "13", + "question": "Eks, kui DigiDoc4 rakenduse veateade ütib, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus, mida teha?" + }, + { + "agency_id": "3", + "id": "14", + "question": "Mida teema on selle veateadega seotud ja mida peale seda teada?" + }, + { + "agency_id": "1", + "id": "15", + "question": "Mida teh kedagi, kui DigiDoc4 rakenduse veateade ütib, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus?" + } + ] + + const filtered = aggregatedData.filter(item => String(item.agency_id) === String(agencyId)); + + const paginated = filtered.slice(startIndex, startIndex + pageSize); + + const result= paginated.map(item => ({ + clientId: item.agency_id, + id: item.id, + clientName: item.agency_id, // No mapping available, so use agency_id + question: item.question + })); + console.log("Filtered and paginated data:", result); + +} + diff --git a/DSL/Resql/global-classifier/POST/insert-metadata.sql b/DSL/Resql/global-classifier/POST/insert-data-chunk-metadata .sql similarity index 100% rename from DSL/Resql/global-classifier/POST/insert-metadata.sql rename to DSL/Resql/global-classifier/POST/insert-data-chunk-metadata .sql diff --git a/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml b/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml index 23e2c1c4..f4d2528e 100644 --- a/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml +++ b/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml @@ -62,14 +62,87 @@ getPaginatedChunkIds: agencyId: ${agencyId} pageNum: ${pageNum} result: paginatedChunksResult - next: return_paginated_result + next: download_agency_chunks_data + +download_single_chunk_data: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_CRON_MANAGER]/execute/download_chunk_data" + query: + datasetId: ${datasetId} + pageNum: ${pageNum} + result: singleChunkResult + next: processSingleChunkResult + +processSingleChunkResult: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_DMAPPER]/hbs/global-classifier/get_single_chunk_data" + headers: + type: json + body: + chunkData: ${singleChunkResult.response} + result: chunkDataResult + next: return_single_chunk_ok + +download_agency_chunks_data: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_CRON_MANAGER]/execute/download_agency_chunk_data" + query: + datasetId: ${datasetId} + chunkIds: ${paginatedChunksResult.response.body.chunks} + # pageNum: ${pageNum} + # agencyId: ${agencyId} + # startIndex: ${paginatedChunksResult.response.body.startIndex} + result: aggregatedDataResult + next: filterChunkDataByAgency + +filterChunkDataByAgency: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_DMAPPER]/hbs/global-classifier/get_filtered_chunks" + headers: + type: json + body: + aggregatedData: ${aggregatedDataResult.response} + startIndex: ${paginatedChunksResult.response.body.startIndex} + agencyId: ${agencyId} + result: filteredChunkDataResult + next: return_filtered_chunk_ok return_chunk_data: return: ${dataChunksResult.response.body} status: 200 next: end -return_paginated_result: - return: ${paginatedChunksResult.response.body} +# return_paginated_result: +# return: ${paginatedChunksResult.response.body} +# status: 200 +# next: end + +return_single_chunk_ok: + assign: + dataResponse: { + operationSuccess: true, + data:chunkDataResult.response.body + } + next: return_single_chunk_response + +return_single_chunk_response: + return: ${dataResponse} + status: 200 + next: end + +return_filtered_chunk_ok: + assign: + dataResponseFiltered: { + operationSuccess: true, + data:filteredChunkDataResult.response.body + } + next: return_filtered_chunk_response + +return_filtered_chunk_response: + return: ${dataResponse} status: 200 next: end \ No newline at end of file diff --git a/GUI/src/pages/ViewDataset/index.tsx b/GUI/src/pages/ViewDataset/index.tsx index c73cd009..c526b142 100644 --- a/GUI/src/pages/ViewDataset/index.tsx +++ b/GUI/src/pages/ViewDataset/index.tsx @@ -1,7 +1,7 @@ import BackArrowButton from 'assets/BackArrowButton'; import { Button, Card, DataTable, Dialog, Icon, Label, Switch } from 'components'; import { ButtonAppearanceTypes, LabelType } from 'enums/commonEnums'; -import React, { useMemo, useState } from 'react'; +import React, { useEffect, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; import { Link, useSearchParams } from 'react-router-dom'; import { generateDynamicColumns } from 'utils/dataTableUtils'; @@ -16,7 +16,6 @@ import DynamicForm from 'components/FormElements/DynamicForm'; import { datasetQueryKeys, integratedAgenciesQueryKeys } from 'utils/queryKeys'; import { getDatasetData, getDatasetMetadata } from 'services/datasets'; import { useQuery } from '@tanstack/react-query'; -import { set } from 'date-fns'; import { useDialog } from 'hooks/useDialog'; import { fetchAllAgencies } from 'services/agencies'; @@ -36,6 +35,8 @@ const ViewDataset = () => { const datasetId = searchParams.get('datasetId'); const [selectedRow, setSelectedRow] = useState(); const [editedRows, setEditedRows] = useState([]); + const [selectedAgencyId, setSelectedAgencyId] = useState("all"); + const { data: metadata, isLoading } = useQuery({ queryKey: datasetQueryKeys.GET_META_DATA(datasetId ?? 0), @@ -43,11 +44,17 @@ const ViewDataset = () => { }); const { data: dataset, isLoading: datasetIsLoading } = useQuery({ - queryKey: datasetQueryKeys.GET_DATA_SETS(datasetId ?? 0, 'all', 1), - queryFn: () => getDatasetData(datasetId ?? 0, 'all', 1), + queryKey: datasetQueryKeys.GET_DATA_SETS(datasetId ?? 0, selectedAgencyId, pagination.pageIndex + 1), + queryFn: () => getDatasetData(datasetId ?? 0, selectedAgencyId, pagination.pageIndex + 1), }); const [updatedDataset, setUpdatedDataset] = useState(dataset); + useEffect(() => { + if (dataset) { + setUpdatedDataset(dataset); + } + }, [dataset]); + const { data: agencies } = useQuery({ queryKey: integratedAgenciesQueryKeys.ALL_AGENCIES_LIST(), queryFn: () => fetchAllAgencies(), @@ -101,14 +108,11 @@ const ViewDataset = () => { ); - const dataColumns = useMemo( - () => generateDynamicColumns(datasets?.fields ?? [], editView, deleteView), - [datasets?.fields] - ); + const dataColumns = generateDynamicColumns(["id", "question", "clientName"], editView, deleteView); const editDataRecord = (dataRow: SelectedRowPayload) => { - const originalRow = datasets?.dataPayload?.find( - (row) => row.id === dataRow.id + const originalRow = dataset?.find( + (row: any) => row.id === dataRow.id ); // Only proceed if question or clientId has changed @@ -135,11 +139,13 @@ const ViewDataset = () => { ? { id: dataRow.id, question: (dataRow as any).question, + clientId: (dataRow as any).clientId, clientName: (dataRow as any).clientName, + } : row ); - setUpdatedDataset(payload as { id: number; question: string; clientName: string; clientId: string }[]); + setUpdatedDataset(payload as { id: number; question: string; clientId: string; clientName: string; }[]); }; const deleteDataRecord = (dataRow: SelectedRowPayload) => { @@ -150,13 +156,30 @@ const ViewDataset = () => { }; const minorUpdate = () => { + const questionUpdated: SelectedRowPayload[] = []; + const clientUpdated: SelectedRowPayload[] = []; + + editedRows.forEach((row) => { + const original = dataset?.find((r: any) => r.id === row.id); + if (!original) return; + const isQuestionChanged = original.question !== row.question; + const isClientChanged = original.clientId !== row.clientId; + + if (isQuestionChanged && !isClientChanged) { + questionUpdated.push(row); + } + if (isClientChanged) { + clientUpdated.push(row); + } + }); + const payload = { - datasetId: datasetId, - updatedRows: editedRows, + questionUpdated, + clientUpdated, deletedRows: deletedRowIds, - } + }; console.log(payload, 'minorUpdatePayload'); - } + }; return (
    @@ -183,12 +206,12 @@ const ViewDataset = () => { {t('datasets.detailedView.connectedModels') ?? ''} : N/A

    - {t('datasets.detailedView.noOfItems') ?? ''} : {datasets?.dataPayload?.length ?? 0} + {t('datasets.detailedView.noOfItems') ?? ''} : {dataset?.length ?? 0}

    - -
    + {/* +
    */} @@ -217,6 +240,11 @@ const ViewDataset = () => { ]} onSelect={(value) => { console.log('Selected option:', value); + setSelectedAgencyId(value); + setPagination({ + pageIndex: 0, + pageSize: 5, + }) }} setPagination={(state: PaginationState) => { if ( @@ -226,7 +254,7 @@ const ViewDataset = () => { return; setPagination(state); }} - pagesCount={1} + pagesCount={10} isClientSide={false} /> )} diff --git a/GUI/src/services/datasets.ts b/GUI/src/services/datasets.ts index 6048c56e..8067a4f6 100644 --- a/GUI/src/services/datasets.ts +++ b/GUI/src/services/datasets.ts @@ -39,5 +39,5 @@ export async function getDatasetData( pageNum : pageNum ?? 1, }, }); - return data?.response?.data ?? []; + return data?.response ?? []; } \ No newline at end of file From cd869623d8e2a1a0a4561c4df344cf719b85fb50 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 26 Jun 2025 01:31:32 +0530 Subject: [PATCH 056/195] fix: update flow to download single chunk data when agencyId is "all" --- DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml b/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml index f4d2528e..01648821 100644 --- a/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml +++ b/DSL/Ruuter.private/global-classifier/GET/datasets/overview.yml @@ -27,7 +27,7 @@ extractRequestData: checkFilter: switch: - condition: ${agencyId === "all"} - next: getAllChunks + next: download_single_chunk_data next: getChunksAndAgencies getAllChunks: From c9b8cfabfd78a91ea413cf6754369581c6cb3fc8 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 30 Jun 2025 17:54:32 +0530 Subject: [PATCH 057/195] feat: enhance DataTable and ViewDataset components with dropdown filters and no data view --- GUI/src/components/DataTable/index.tsx | 9 +++++---- .../molecules/DataModelCard/index.tsx | 4 ++-- GUI/src/pages/ViewDataset/index.tsx | 18 ++++++++++++------ 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/GUI/src/components/DataTable/index.tsx b/GUI/src/components/DataTable/index.tsx index 58317410..5673a6ad 100644 --- a/GUI/src/components/DataTable/index.tsx +++ b/GUI/src/components/DataTable/index.tsx @@ -27,7 +27,6 @@ import { MdOutlineWest, } from 'react-icons/md'; import clsx from 'clsx'; -import { Link } from 'react-router-dom'; import { useTranslation } from 'react-i18next'; import { Icon, Track } from 'components'; import Filter from './Filter'; @@ -180,7 +179,7 @@ const DataTable: FC = ( )} {flexRender(header.column.columnDef.header, header.getContext())} - {filterable && header.column.getCanFilter() && ( + {dropdownFilters && header.column.getCanFilter() && ( (() => { const dropdownConfig = dropdownFilters?.find( (df) => df.columnId === header.column.id @@ -196,9 +195,11 @@ const DataTable: FC = ( /> ); } - return ; + })() )} + {filterable && header.column.getCanFilter() && ( + )} )} @@ -265,4 +266,4 @@ const DataTable: FC = ( ); }; -export default DataTable; +export default DataTable; \ No newline at end of file diff --git a/GUI/src/components/molecules/DataModelCard/index.tsx b/GUI/src/components/molecules/DataModelCard/index.tsx index 2d4daa76..0aa795ce 100644 --- a/GUI/src/components/molecules/DataModelCard/index.tsx +++ b/GUI/src/components/molecules/DataModelCard/index.tsx @@ -202,7 +202,7 @@ const DataModelCard: FC> = ({ }} > - {t('datasetGroups.datasetCard.settings') ?? ''} + {t('datasets.datasetCard.settings') ?? ''}
    @@ -210,4 +210,4 @@ const DataModelCard: FC> = ({ ); }; -export default DataModelCard; +export default DataModelCard; \ No newline at end of file diff --git a/GUI/src/pages/ViewDataset/index.tsx b/GUI/src/pages/ViewDataset/index.tsx index c526b142..88f28faa 100644 --- a/GUI/src/pages/ViewDataset/index.tsx +++ b/GUI/src/pages/ViewDataset/index.tsx @@ -18,6 +18,7 @@ import { getDatasetData, getDatasetMetadata } from 'services/datasets'; import { useQuery } from '@tanstack/react-query'; import { useDialog } from 'hooks/useDialog'; import { fetchAllAgencies } from 'services/agencies'; +import NoDataView from 'components/molecules/NoDataView'; const ViewDataset = () => { const { t } = useTranslation(); @@ -206,7 +207,7 @@ const ViewDataset = () => { {t('datasets.detailedView.connectedModels') ?? ''} : N/A

    - {t('datasets.detailedView.noOfItems') ?? ''} : {dataset?.length ?? 0} + {t('datasets.detailedView.noOfItems') ?? ''} : {20}

    @@ -222,12 +223,11 @@ const ViewDataset = () => { )}
    {isLoading && } - {!isLoading && updatedDataset && updatedDataset.length > 0 && ( + {!isLoading && updatedDataset && updatedDataset?.length > 0 && ( []} pagination={pagination} - filterable dropdownFilters={[ { columnId: 'clientName', @@ -244,7 +244,8 @@ const ViewDataset = () => { setPagination({ pageIndex: 0, pageSize: 5, - }) + }); + setUpdatedDataset([]); }} setPagination={(state: PaginationState) => { if ( @@ -254,10 +255,15 @@ const ViewDataset = () => { return; setPagination(state); }} - pagesCount={10} + pagesCount={4} isClientSide={false} /> )} + { + updatedDataset?.length === 0 && ( + + ) + }
    ); - const dataColumns = generateDynamicColumns(["id", "question", "clientName"], editView, deleteView); + const dataColumns = useMemo( + () => generateDynamicColumns(["id", "question", "clientName"], editView, deleteView), + [editView, deleteView] + ); const editDataRecord = (dataRow: SelectedRowPayload) => { const originalRow = dataset?.find( From d6f0294478f2839e9d1decfd169f49e3c2b448c0 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 30 Jun 2025 21:43:14 +0530 Subject: [PATCH 060/195] feat : create data model ui and integration --- .../hbs/format_dataset_versions.handlebars | 5 + .../POST/get-all-dataset-versions.sql | 3 + .../GET/datasets/versions.yml | 31 ++ GUI/src/App.tsx | 3 +- .../molecules/DataModelForm/index.tsx | 133 +++++++ .../pages/DataModels/ConfigureDataModel.tsx | 361 ++++++++++++++++++ GUI/src/pages/DataModels/CreateDataModel.tsx | 110 ++++++ GUI/src/pages/DataModels/index.tsx | 1 - GUI/src/services/datasets.ts | 5 + GUI/src/types/dataModels.ts | 9 +- GUI/src/utils/commonUtilts.ts | 11 + GUI/src/utils/endpoints.ts | 2 +- GUI/src/utils/queryKeys.ts | 2 +- GUI/translations/en/common.json | 8 +- 14 files changed, 672 insertions(+), 12 deletions(-) create mode 100644 DSL/DMapper/global-classifier/hbs/format_dataset_versions.handlebars create mode 100644 DSL/Resql/global-classifier/POST/get-all-dataset-versions.sql create mode 100644 DSL/Ruuter.private/global-classifier/GET/datasets/versions.yml create mode 100644 GUI/src/components/molecules/DataModelForm/index.tsx create mode 100644 GUI/src/pages/DataModels/ConfigureDataModel.tsx create mode 100644 GUI/src/pages/DataModels/CreateDataModel.tsx diff --git a/DSL/DMapper/global-classifier/hbs/format_dataset_versions.handlebars b/DSL/DMapper/global-classifier/hbs/format_dataset_versions.handlebars new file mode 100644 index 00000000..445d532a --- /dev/null +++ b/DSL/DMapper/global-classifier/hbs/format_dataset_versions.handlebars @@ -0,0 +1,5 @@ +{{#each datasets}} + {{#if @first}}[{{/if}} + {"id":{{id}},"version":"V{{major}}.{{minor}}"}{{#unless @last}},{{/unless}} + {{#if @last}}]{{/if}} +{{/each}} \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-all-dataset-versions.sql b/DSL/Resql/global-classifier/POST/get-all-dataset-versions.sql new file mode 100644 index 00000000..66430eb4 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-all-dataset-versions.sql @@ -0,0 +1,3 @@ +SELECT id, major, minor +FROM public.datasets +ORDER BY id; \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/GET/datasets/versions.yml b/DSL/Ruuter.private/global-classifier/GET/datasets/versions.yml new file mode 100644 index 00000000..c6741634 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/datasets/versions.yml @@ -0,0 +1,31 @@ +declaration: + call: declare + version: 0.1 + description: "Get dataset versions as [id, version] array" + method: get + accepts: json + returns: json + namespace: global-classifier + +getAllDatasetVersions: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-all-dataset-versions" + result: datasets_res + next: format_versions + +format_versions: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_DMAPPER]/hbs/global-classifier/format_dataset_versions" + headers: + type: json + body: + datasets: ${datasets_res.response.body} + result: formatted_versions + next: return_result + +return_result: + return: ${formatted_versions.response.body} + status: 200 + next: end \ No newline at end of file diff --git a/GUI/src/App.tsx b/GUI/src/App.tsx index 51434ca0..1c85b02c 100644 --- a/GUI/src/App.tsx +++ b/GUI/src/App.tsx @@ -13,6 +13,7 @@ import IntegratedAgencies from 'pages/IntegratedAgencies'; import Datasets from 'pages/Datasets'; import ViewDataset from 'pages/ViewDataset'; import DataModels from 'pages/DataModels'; +import CreateDataModel from 'pages/DataModels/CreateDataModel'; const App: FC = () => { const navigate = useNavigate(); @@ -64,7 +65,7 @@ const App: FC = () => { } /> } /> } /> - + } /> )} diff --git a/GUI/src/components/molecules/DataModelForm/index.tsx b/GUI/src/components/molecules/DataModelForm/index.tsx new file mode 100644 index 00000000..63adfcad --- /dev/null +++ b/GUI/src/components/molecules/DataModelForm/index.tsx @@ -0,0 +1,133 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { + FormCheckboxes, + FormInput, + FormRadios, + FormSelect, + Label, +} from 'components'; +import { formattedArray, toLabelValueArray } from 'utils/commonUtilts'; +import { useQuery } from '@tanstack/react-query'; +import CircularSpinner from '../CircularSpinner/CircularSpinner'; +import { DataModel } from 'types/dataModels'; +import { dataModelsQueryKeys, datasetQueryKeys } from 'utils/queryKeys'; +import { getDeploymentEnvironments } from 'services/datamodels'; +import { getAllDatasetVersions } from 'services/datasets'; + +type DataModelFormType = { + dataModel: any; + handleChange: (name: keyof DataModel, value: any) => void; + errors?: Record; + type: string; +}; + +const DataModelForm: FC = ({ + dataModel, + handleChange, + errors, + type, +}) => { + const { t } = useTranslation(); + const { data: deploymentEnvironmentsData } = useQuery({ + queryKey: datasetQueryKeys.DATASET_VERSIONS(), + queryFn: () => getDeploymentEnvironments(), + }); + + const { data: datasetVersions } = useQuery({ + queryKey: dataModelsQueryKeys.DATA_MODEL_DEPLOYMENT_ENVIRONMENTS(), + queryFn: () => getAllDatasetVersions(), + }); + + return ( +
    + {type === 'create' ? ( +
    +
    + handleChange('modelName', e.target.value)} + error={errors?.modelName} + /> +
    +
    + {t('dataModels.dataModelForm.modelVersion')}{' '} + +
    +
    + ) : ( +
    +
    {dataModel.modelName}
    + +
    + )} + + {((type === 'configure') || type === 'create') + ? ( +
    +
    + {t('dataModels.dataModelForm.datasetGroup')}{' '} +
    +
    + { + handleChange('datasetId', selection?.value); + }} + value={dataModel?.datasetId === null && t('dataModels.dataModelForm.errors.datasetVersionNotExist')} + defaultValue={dataModel?.datasetId ? dataModel?.datasetId : t('dataModels.dataModelForm.errors.datasetVersionNotExist')} + error={errors?.datasetId} + /> +
    + {(type === 'configure') && !dataModel.datasetId && {t('dataModels.dataModelForm.errors.datasetVersionNotExist')}} +
    +
    + +
    + {t('dataModels.dataModelForm.baseModels')}{' '} +
    +
    + + handleChange('baseModels', values.baseModels) + } + error={errors?.baseModels} + selectedValues={dataModel?.baseModels} + /> +
    + +
    + {t('dataModels.dataModelForm.deploymentPlatform')}{' '} +
    +
    + handleChange('deploymentEnvironment', value)} + error={errors?.deploymentEnvironment} + selectedValue={dataModel?.deploymentEnvironment} + /> +
    +
    + ) : ( + + )} +
    + ); +}; + +export default DataModelForm; diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx new file mode 100644 index 00000000..21a7b595 --- /dev/null +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -0,0 +1,361 @@ +import { FC, useRef, useState } from 'react'; +import { useMutation, useQuery } from '@tanstack/react-query'; +import { Link, useNavigate } from 'react-router-dom'; +import { Button, Card, Dialog } from 'components'; +import { useDialog } from 'hooks/useDialog'; +import BackArrowButton from 'assets/BackArrowButton'; +import { + deleteDataModel, + getMetadata, + retrainDataModel, + updateDataModel, +} from 'services/data-models'; +import DataModelForm from 'components/molecules/DataModelForm'; +import { getChangedAttributes } from 'utils/dataModelsUtils'; +import { Platform, UpdateType } from 'enums/dataModelsEnums'; +import { ButtonAppearanceTypes } from 'enums/commonEnums'; +import CircularSpinner from 'components/molecules/CircularSpinner/CircularSpinner'; +import { DataModel, UpdatedDataModelPayload } from 'types/dataModels'; +import { dataModelsQueryKeys } from 'utils/queryKeys'; +import { useTranslation } from 'react-i18next'; +import './DataModels.scss'; + +type ConfigureDataModelType = { + id: number; + availableProdModels?: string[]; +}; + +const ConfigureDataModel: FC = ({ + id, + availableProdModels, +}) => { + const { t } = useTranslation(); + const { open, close } = useDialog(); + const navigate = useNavigate(); + const [enabled, setEnabled] = useState(true); + const [initialData, setInitialData] = useState>({ + modelName: '', + dgId: 0, + platform: '', + baseModels: [], + maturity: '', + version: '', + }); + const [dataModel, setDataModel] = useState({ + modelId: 0, + modelName: '', + dgId: 0, + platform: '', + baseModels: [], + maturity: '', + version: '', + }); + const [modalOpen, setModalOpen] = useState(false); + const [modalType, setModalType] = useState(''); + const [modalTitle, setModalTitle] = useState(''); + const [modalDiscription, setModalDiscription] = useState(''); + const modalFunciton = useRef(() => { }); + const { isLoading } = useQuery( + dataModelsQueryKeys.GET_META_DATA(id), + () => getMetadata(id), + { + enabled, + onSuccess: (data) => { + setDataModel({ + modelId: data?.modelId || 0, + modelName: data?.modelName || '', + dgId: data?.connectedDgId || 0, + platform: data?.deploymentEnv || '', + baseModels: data?.baseModels || [], + maturity: data?.maturityLabel || '', + version: `V${data?.majorVersion}.${data?.minorVersion}`, + }); + setInitialData({ + modelName: data?.modelName || '', + dgId: data?.connectedDgId || 0, + platform: data?.deploymentEnv || '', + baseModels: data?.baseModels || [], + maturity: data?.maturityLabel || '', + version: `V${data?.majorVersion}.${data?.minorVersion}`, + }); + setEnabled(false); + }, + } + ); + + const handleDataModelAttributesChange = ( + name: keyof DataModel, + value: any + ) => { + setDataModel((prevDataModel) => ({ + ...prevDataModel, + [name]: value, + })); + }; + + const handleSave = () => { + const payload = getChangedAttributes(initialData, dataModel); + let updateType: string | undefined; + if (payload.dgId) { + updateType = UpdateType.MAJOR; + } else if (payload.baseModels || payload.platform) { + updateType = UpdateType.MINOR; + } else if (payload.maturity) { + updateType = UpdateType.MATURITY_LABEL; + } + + const updatedPayload = { + modelId: dataModel.modelId, + connectedDgId: payload.dgId, + deploymentEnv: payload.platform, + baseModels: payload.baseModels, + maturityLabel: payload.maturity, + updateType: updateType, + }; + + if (updateType) { + if (availableProdModels?.includes(dataModel.platform)) { + openModal( + t('dataModels.createDataModel.replaceDesc'), + t('dataModels.createDataModel.replaceTitle'), + () => updateDataModelMutation.mutate(updatedPayload), + 'replace' + ); + } else { + updateDataModelMutation.mutate(updatedPayload); + } + } + }; + + const updateDataModelMutation = useMutation({ + mutationFn: (data: UpdatedDataModelPayload) => updateDataModel(data), + onSuccess: async () => { + open({ + title: t('dataModels.configureDataModel.saveChangesTitile'), + content:

    {t('dataModels.configureDataModel.saveChangesDesc')}

    , + footer: ( +
    + {' '} + +
    + ), + }); + }, + onError: () => { + open({ + title: t('dataModels.configureDataModel.updateErrorTitile'), + content:

    {t('dataModels.configureDataModel.updateErrorDesc')}

    , + }); + }, + }); + + const handleDelete = () => { + if ( + dataModel.platform === Platform.JIRA || + dataModel.platform === Platform.OUTLOOK + ) { + open({ + title: t('dataModels.configureDataModel.deleteErrorTitle'), + content:

    {t('dataModels.configureDataModel.deleteErrorDesc')}

    , + footer: ( +
    + +
    + ), + }); + } else { + openModal( + t('dataModels.configureDataModel.deleteConfirmationDesc'), + t('dataModels.configureDataModel.deleteConfirmation'), + () => deleteDataModelMutation.mutate(dataModel.modelId), + 'delete' + ); + + } + }; + + const deleteDataModelMutation = useMutation({ + mutationFn: (modelId: number) => deleteDataModel(modelId), + onSuccess: async (response) => { + close(); + navigate(0); + }, + onError: () => { + open({ + title: t('dataModels.configureDataModel.deleteModalErrorTitle'), + content: ( +

    {t('dataModels.configureDataModel.deleteModalErrorDesc')}

    + ), + }); + }, + }); + + const retrainDataModelMutation = useMutation({ + mutationFn: (modelId: number) => retrainDataModel(modelId), + onSuccess: async () => { + close(); + navigate(0); + setModalOpen(false) + }, + onError: () => { + open({ + title: t('dataModels.configureDataModel.retrainDataModalErrorTitle'), + content: ( +

    {t('dataModels.configureDataModel.retrainDataModalErrorDesc')}

    + ), + }); + }, + }); + + const openModal = ( + content: string, + title: string, + onConfirm: () => void, + modalType: string + ) => { + setModalOpen(true); + setModalType(modalType); + setModalDiscription(content); + setModalTitle(title); + modalFunciton.current = onConfirm; + }; + return ( +
    +
    +
    + navigate(0)}> + + +
    + {t('dataModels.configureDataModel.title')} +
    +
    + + +
    +
    +

    {t('dataModels.configureDataModel.retrainCard')}

    + +
    +
    +
    + + {isLoading ? ( + + ) : ( + + )} +
    +
    + + + +
    + + setModalOpen(false)} + isOpen={modalOpen} + title={modalTitle} + footer={ +
    + + {modalType === 'retrain' ? ( + + ) : modalType === 'delete' ? ( + + ) : ( + + )} +
    + } + > +
    {modalDiscription}
    +
    +
    + ); +}; + +export default ConfigureDataModel; \ No newline at end of file diff --git a/GUI/src/pages/DataModels/CreateDataModel.tsx b/GUI/src/pages/DataModels/CreateDataModel.tsx new file mode 100644 index 00000000..9f79c6eb --- /dev/null +++ b/GUI/src/pages/DataModels/CreateDataModel.tsx @@ -0,0 +1,110 @@ +import { FC, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Button } from 'components'; +import { Link, useNavigate } from 'react-router-dom'; +import './DataModels.scss'; +import { useMutation, useQuery } from '@tanstack/react-query'; +import { useDialog } from 'hooks/useDialog'; +import BackArrowButton from 'assets/BackArrowButton'; +import DataModelForm from 'components/molecules/DataModelForm'; +import { ButtonAppearanceTypes } from 'enums/commonEnums'; +import { + CreateDataModelPayload, + DataModel, + ErrorsType, +} from 'types/dataModels'; +import { da } from 'date-fns/locale'; + +const CreateDataModel: FC = () => { + const { t } = useTranslation(); + const { open, close } = useDialog(); + const navigate = useNavigate(); + const [availableProdModels, setAvailableProdModels] = useState([]); + + const [dataModel, setDataModel] = useState>({ + modelName: '', + datasetId: 0, + baseModels: [], + deploymentEnvironment: '', + version: 'V1.0', + }); + + const handleDataModelAttributesChange = (name: string, value: string) => { + setDataModel((prevFilters) => ({ + ...prevFilters, + [name]: value, + })); + + setErrors((prevErrors) => { + const updatedErrors = { ...prevErrors }; + + if (name === 'modelName' && value !== '') { + delete updatedErrors.modelName; + } + if (name === 'baseModels' && value !== '') { + delete updatedErrors.baseModels; + } + if (name === 'deploymentEnvironment' && value !== '') { + delete updatedErrors.deploymentEnvironment; + } + if (name === 'datasetId') { + delete updatedErrors.datasetId; + } + + return updatedErrors; + }); + }; + + const [errors, setErrors] = useState({ + modelName: '', + datasetId: '', + baseModels: '', + deploymentEnvironment: '', + }); + + const handleCreate = () => { + console.log(dataModel); + + }; + +const isCreateDisabled = () => { + return ( + !dataModel.modelName || + !dataModel.datasetId || + !dataModel.baseModels || + (Array.isArray(dataModel.baseModels) && dataModel.baseModels.length === 0) || + !dataModel.deploymentEnvironment + ); +}; + + return ( +
    +
    +
    +
    + + + +
    {t('dataModels.createDataModel.title')}
    +
    +
    + +
    +
    + + +
    +
    + ); +}; + +export default CreateDataModel; diff --git a/GUI/src/pages/DataModels/index.tsx b/GUI/src/pages/DataModels/index.tsx index e1259d6c..da600615 100644 --- a/GUI/src/pages/DataModels/index.tsx +++ b/GUI/src/pages/DataModels/index.tsx @@ -47,7 +47,6 @@ const DataModels: FC = () => { }); const pageCount = dataModelsData?.[0]?.totalPages || 0; -console.log(deploymentEnvironmentsData); const handleFilterChange = ( name: keyof DataModelsFilters, diff --git a/GUI/src/services/datasets.ts b/GUI/src/services/datasets.ts index 6048c56e..3617225c 100644 --- a/GUI/src/services/datasets.ts +++ b/GUI/src/services/datasets.ts @@ -40,4 +40,9 @@ export async function getDatasetData( }, }); return data?.response?.data ?? []; +} + +export async function getAllDatasetVersions() { + const { data } = await apiDev.get(datasetsEndpoints.GET_ALL_DATASET_VERSIONS()); + return data?.response ?? []; } \ No newline at end of file diff --git a/GUI/src/types/dataModels.ts b/GUI/src/types/dataModels.ts index be8a6d43..87a077b1 100644 --- a/GUI/src/types/dataModels.ts +++ b/GUI/src/types/dataModels.ts @@ -2,10 +2,10 @@ export type DataModel = { modelId: number; modelName: string; dgName?: string; - dgId: string | number; + datasetId: string | number; platform: string; baseModels: string[]; - maturity: string; + deploymentEnvironment: string; version?: string; }; @@ -86,8 +86,7 @@ export type DataModelsFilters = { export type ErrorsType = { modelName?: string; dgName?: string; - platform?: string; + deploymentEnvironment?: string; baseModels?: string; - maturity?: string; - dgId?: string; + datasetId?: string; }; \ No newline at end of file diff --git a/GUI/src/utils/commonUtilts.ts b/GUI/src/utils/commonUtilts.ts index c59bde85..82cd35b9 100644 --- a/GUI/src/utils/commonUtilts.ts +++ b/GUI/src/utils/commonUtilts.ts @@ -15,6 +15,17 @@ export const formattedArray = (data: string[]|undefined): FormattedOption[]|unde })); }; +export const toLabelValueArray = ( + data: T[] | undefined, + valueField: keyof T, + labelField: keyof T +): { label: string; value: string }[] | undefined => { + return data?.map((item) => ({ + label: String(item[labelField]), + value: String(item[valueField]), + })); +}; + export const convertTimestampToDateTime = (timestamp: number) => { return moment.unix(timestamp).format('YYYY-MM-DD HH:mm:ss'); diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index 462e6db6..0eaa5cb5 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -18,7 +18,7 @@ export const datasetsEndpoints = { GET_OVERVIEW: (): string => '/global-classifier/datasets/list', GET_METADATA: (): string => `/global-classifier/datasets/metadata`, GET_DATASETS_DATA: (): string => '/global-classifier/datasets/overview', - + GET_ALL_DATASET_VERSIONS: (): string => '/global-classifier/datasets/versions', GET_DATASET_FILTERS: (): string => diff --git a/GUI/src/utils/queryKeys.ts b/GUI/src/utils/queryKeys.ts index ef1c76bb..c4ffcc04 100644 --- a/GUI/src/utils/queryKeys.ts +++ b/GUI/src/utils/queryKeys.ts @@ -25,6 +25,7 @@ export const integratedAgenciesQueryKeys = { export const datasetQueryKeys = { DATASET_FILTERS: (): string[] => ['datasets/filters'], + DATASET_VERSIONS: (): string[] => ['datasets/versions'], DATASET_OVERVIEW: function ( pageIndex?: number, generationStatus?: string, @@ -63,7 +64,6 @@ export const authQueryKeys = { export const dataModelsQueryKeys = { DATA_MODEL_FILTERS: (): string[] => ['datamodels/filters'], DATA_MODEL_DEPLOYMENT_ENVIRONMENTS: (): string[] => ['datamodels/deployment-environments'], - DATA_MODELS_OVERVIEW: function ( pageIndex?: number, modelStatus?:string, diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index 7a019488..09e70c92 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -402,10 +402,12 @@ }, "dataModelForm": { "modelVersion": "Model Version", - "datasetGroup": "Select Dataset Group", + "datasetGroup": "Select Dataset Version", "baseModels": "Select Base Models", - "deploymentPlatform": "Select Deployment Platform", - "maturityLabel": "Select Maturity Label" + "deploymentPlatform": "Select Deployment Environment", + "errors":{ + "datasetVersionNotExist":"Dataset version does not exist" + } } }, "trainingSessions": { From e69c8ab3b7403b7e0000ba88abea2260c64e271a Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Tue, 1 Jul 2025 09:15:43 +0530 Subject: [PATCH 061/195] chnaged schemas.py --- minio_presigned_urls.txt | 10 --- minio_signed_urls.py | 65 ------------------- .../chunks_handler_api.py | 10 +-- src/dataset_file_hanlder/models/schemas.py | 10 +-- 4 files changed, 10 insertions(+), 85 deletions(-) delete mode 100644 minio_presigned_urls.txt delete mode 100644 minio_signed_urls.py diff --git a/minio_presigned_urls.txt b/minio_presigned_urls.txt deleted file mode 100644 index ac9ad656..00000000 --- a/minio_presigned_urls.txt +++ /dev/null @@ -1,10 +0,0 @@ -http://minio:9000/ckb/agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=94db13ede49dd83ec7007477084e784eef7affebe55581f69e8ee0c923b6bdd1|||http://minio:9000/ckb/agencies/ID.ee/ID.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=3cd531c069dfe1a03df04e7d4cdab185d7aadbfedfc729db28be73e0a35c60fd - -Individual URLs: -================================================== -URL 1: -http://minio:9000/ckb/agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=94db13ede49dd83ec7007477084e784eef7affebe55581f69e8ee0c923b6bdd1 - -URL 2: -http://minio:9000/ckb/agencies/ID.ee/ID.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250624%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250624T091734Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=3cd531c069dfe1a03df04e7d4cdab185d7aadbfedfc729db28be73e0a35c60fd - diff --git a/minio_signed_urls.py b/minio_signed_urls.py deleted file mode 100644 index 26395082..00000000 --- a/minio_signed_urls.py +++ /dev/null @@ -1,65 +0,0 @@ -import boto3 -from botocore.client import Config - -# Create S3 client for MinIO -s3_client = boto3.client( - 's3', - endpoint_url='http://minio:9000', # Replace with your MinIO URL - aws_access_key_id='minioadmin', # Replace with your access key - aws_secret_access_key='minioadmin', # Replace with your secret key - config=Config(signature_version='s3v4'), # Hardcoded signature version - region_name='us-east-1' # MinIO usually works with any region -) - -# List of files to generate URLs for -files_to_process = [ - {'bucket': 'ckb', 'key': 'agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip'}, - {'bucket': 'ckb', 'key': 'agencies/ID.ee/ID.zip'}, - # Add more files as needed - # {'bucket': 'ckb', 'key': 'agencies/another-agency/file.zip'}, -] - -# Generate presigned URLs -presigned_urls = [] - -print("Generating presigned URLs...") -for file_info in files_to_process: - try: - url = s3_client.generate_presigned_url( - ClientMethod='get_object', - Params={'Bucket': file_info['bucket'], 'Key': file_info['key']}, - ExpiresIn=24 * 3600 # 4 hours in seconds - ) - presigned_urls.append(url) - print(f"✅ Generated URL for: {file_info['key']}") - print(f" URL: {url}") - except Exception as e: - print(f"❌ Failed to generate URL for: {file_info['key']}") - print(f" Error: {str(e)}") - -output_file = 'minio_presigned_urls.txt' - -try: - with open(output_file, 'w') as f: - # Write URLs separated by ||| delimiter (for your script) - url_string = '|||'.join(presigned_urls) - f.write(url_string) - f.write('\n\n') - - # Also write each URL on separate lines for readability - f.write("Individual URLs:\n") - f.write("=" * 50 + "\n") - for i, url in enumerate(presigned_urls, 1): - f.write(f"URL {i}:\n{url}\n\n") - - print(f"\n✅ Presigned URLs saved to: {output_file}") - print(f"Total URLs generated: {len(presigned_urls)}") - - # Display the combined URL string for easy copying - if presigned_urls: - print("\nCombined URL string (for signedUrls environment variable):") - print("=" * 60) - print('|||'.join(presigned_urls)) - -except Exception as e: - print(f"❌ Failed to save URLs to file: {str(e)}") \ No newline at end of file diff --git a/src/dataset_file_hanlder/chunks_handler_api.py b/src/dataset_file_hanlder/chunks_handler_api.py index 3df10f2f..82d3924d 100644 --- a/src/dataset_file_hanlder/chunks_handler_api.py +++ b/src/dataset_file_hanlder/chunks_handler_api.py @@ -36,9 +36,9 @@ async def download_chunk(request: ChunkDownloadRequest): Chunk data or error information """ try: - logger.info(f"Chunk download request - Dataset: {request.dataset_id}, Page: {request.page_num}") + logger.info(f"Chunk download request - Dataset: {request.datasetId}, Page: {request.pageNum}") - result = chunk_service.download_chunk_from_s3(request.dataset_id, request.page_num) + result = chunk_service.download_chunk_from_s3(request.datasetId, request.pageNum) if not result["success"]: raise HTTPException(status_code=400, detail=result) @@ -64,12 +64,12 @@ async def download_multiple_chunks(request: MultiChunkDownloadRequest): Aggregated chunk data or error information """ try: - logger.info(f"Multi-chunk download request - Dataset: {request.dataset_id}, Chunks: {request.chunk_ids}") + logger.info(f"Multi-chunk download request - Dataset: {request.datasetId}, Chunks: {request.chunkIds}") - if not request.chunk_ids: + if not request.chunkIds: raise HTTPException(status_code=400, detail="No chunk IDs provided") - result = multi_chunk_service.download_multiple_chunks(request.dataset_id, request.chunk_ids) + result = multi_chunk_service.download_multiple_chunks(request.datasetId, request.chunkIds) if not result["success"]: raise HTTPException(status_code=400, detail=result) diff --git a/src/dataset_file_hanlder/models/schemas.py b/src/dataset_file_hanlder/models/schemas.py index c31722ce..e91f35b8 100644 --- a/src/dataset_file_hanlder/models/schemas.py +++ b/src/dataset_file_hanlder/models/schemas.py @@ -37,14 +37,14 @@ class DownloadResponse(BaseModel): class ChunkDownloadRequest(BaseModel): """Request model for downloading a single chunk.""" - dataset_id: str - page_num: int + datasetId: str + pageNum: int class MultiChunkDownloadRequest(BaseModel): """Request model for downloading multiple chunks.""" - dataset_id: str - chunk_ids: List[int] + datasetId: str + chunkIds: List[int] class ChunkDownloadResponse(BaseModel): @@ -66,4 +66,4 @@ class MultiChunkDownloadResponse(BaseModel): download_summary: Dict[str, Any] download_details: Optional[List[Dict[str, Any]]] = None error: Optional[str] = None - message: str + message: str \ No newline at end of file From 043c8901ea2b31364af4a4adcacaa061efd52a36 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Tue, 1 Jul 2025 09:24:41 +0530 Subject: [PATCH 062/195] fixed merge conflicts --- docker-compose.yml | 100 ++++++++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 3d5e78e7..6c9b5ee0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -58,22 +58,22 @@ services: networks: - bykstack - # tim: - # container_name: tim - # image: tim - # depends_on: - # - tim-postgresql - # environment: - # - SECURITY_ALLOWLIST_JWT=ruuter-private,ruuter-public,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1 - # - KEY_PASS=ppjjpp - # ports: - # - 8085:8085 - # networks: - # - bykstack - # extra_hosts: - # - "host.docker.internal:host-gateway" - # cpus: "0.5" - # mem_limit: "512M" + tim: + container_name: tim + image: tim + depends_on: + - tim-postgresql + environment: + - SECURITY_ALLOWLIST_JWT=ruuter-private,ruuter-public,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1 + - KEY_PASS=ppjjpp + ports: + - 8085:8085 + networks: + - bykstack + extra_hosts: + - "host.docker.internal:host-gateway" + cpus: "0.5" + mem_limit: "512M" tim-postgresql: container_name: tim-postgresql @@ -90,13 +90,13 @@ services: networks: - bykstack - # authentication-layer: - # container_name: authentication-layer - # image: authentication-layer - # ports: - # - 3004:3004 - # networks: - # - bykstack + authentication-layer: + container_name: authentication-layer + image: authentication-layer + ports: + - 3004:3004 + networks: + - bykstack resql: container_name: resql @@ -305,34 +305,34 @@ services: retries: 3 restart: unless-stopped - # gui: - # container_name: gui - # environment: - # - NODE_ENV=local - # - REACT_APP_RUUTER_API_URL=http://localhost:8086 - # - REACT_APP_RUUTER_PRIVATE_API_URL=http://localhost:8088 - # - REACT_APP_EXTERNAL_API_URL=http://localhost:8000 - # - REACT_APP_CUSTOMER_SERVICE_LOGIN=http://localhost:3004/et/dev-auth - # - REACT_APP_NOTIFICATION_NODE_URL=http://localhost:4040 - # - REACT_APP_CSP=upgrade-insecure-requests; default-src 'self'; font-src 'self' data:; img-src 'self' data:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' http://localhost:8086 http://localhost:8088 http://localhost:8085 http://localhost:4040 http://localhost:3001 http://localhost:8000; - # - DEBUG_ENABLED=true - # - CHOKIDAR_USEPOLLING=true - # - PORT=3001 - # - REACT_APP_SERVICE_ID=conversations,settings,monitoring - # - REACT_APP_ENABLE_HIDDEN_FEATURES=TRUE + gui: + container_name: gui + environment: + - NODE_ENV=local + - REACT_APP_RUUTER_API_URL=http://localhost:8086 + - REACT_APP_RUUTER_PRIVATE_API_URL=http://localhost:8088 + - REACT_APP_EXTERNAL_API_URL=http://localhost:8000 + - REACT_APP_CUSTOMER_SERVICE_LOGIN=http://localhost:3004/et/dev-auth + - REACT_APP_NOTIFICATION_NODE_URL=http://localhost:4040 + - REACT_APP_CSP=upgrade-insecure-requests; default-src 'self'; font-src 'self' data:; img-src 'self' data:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' http://localhost:8086 http://localhost:8088 http://localhost:8085 http://localhost:4040 http://localhost:3001 http://localhost:8000; + - DEBUG_ENABLED=true + - CHOKIDAR_USEPOLLING=true + - PORT=3001 + - REACT_APP_SERVICE_ID=conversations,settings,monitoring + - REACT_APP_ENABLE_HIDDEN_FEATURES=TRUE - # build: - # context: ./GUI - # dockerfile: Dockerfile.dev - # ports: - # - 3003:3001 - # volumes: - # - /app/node_modules - # - ./GUI:/app - # networks: - # - bykstack - # cpus: "0.5" - # mem_limit: "1G" + build: + context: ./GUI + dockerfile: Dockerfile.dev + ports: + - 3003:3001 + volumes: + - /app/node_modules + - ./GUI:/app + networks: + - bykstack + cpus: "0.5" + mem_limit: "1G" volumes: shared-volume: From e50b24462d09a5136f8286a5c7fae6f631750676 Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Tue, 1 Jul 2025 17:18:17 +0530 Subject: [PATCH 063/195] Update GUI/src/pages/DataModels/ConfigureDataModel.tsx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- GUI/src/pages/DataModels/ConfigureDataModel.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index 21a7b595..3cd88218 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -35,7 +35,7 @@ const ConfigureDataModel: FC = ({ const [enabled, setEnabled] = useState(true); const [initialData, setInitialData] = useState>({ modelName: '', - dgId: 0, + datasetId: 0, platform: '', baseModels: [], maturity: '', From efd00065ba58e9852d18457ac3dc76fdbf3b7e5c Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Tue, 1 Jul 2025 17:18:30 +0530 Subject: [PATCH 064/195] Update GUI/src/pages/DataModels/ConfigureDataModel.tsx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- GUI/src/pages/DataModels/ConfigureDataModel.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index 3cd88218..b8e03dfa 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -36,7 +36,7 @@ const ConfigureDataModel: FC = ({ const [initialData, setInitialData] = useState>({ modelName: '', datasetId: 0, - platform: '', + deploymentEnvironment: '', baseModels: [], maturity: '', version: '', From a81a8adbb928ed537f04d623804d66b5fd25a18f Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Wed, 2 Jul 2025 06:15:33 +0530 Subject: [PATCH 065/195] fixed merge conflicts --- docker-compose.yml | 155 +++++++++++++++++++++------------------------ 1 file changed, 71 insertions(+), 84 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 6c9b5ee0..e860039b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -144,13 +144,12 @@ services: cron-manager: container_name: cron-manager - image: cron-manager-python:latest + image: cron-manager:latest user: "root" volumes: - ./DSL/CronManager/DSL:/DSL - ./DSL/CronManager/script:/app/scripts - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets - - ./DSL/DatasetGenerator/temp_chunks:/app/temp_chunks - ./src/s3_dataset_processor:/app/src/s3_dataset_processor - ./DSL/DatasetGenerator/config:/app/config - cron_data:/app/data @@ -178,71 +177,71 @@ services: # restart: always # Dataset Generator services - dataset-gen-ollama: - image: synthesisai/dataset-generator-ollama:latest - container_name: dataset-gen-ollama - ports: - - "11434:11434" - environment: - - NVIDIA_VISIBLE_DEVICES=all - - OLLAMA_USE_GPU=1 - - OLLAMA_HOST=0.0.0.0 - volumes: - - dataset_gen_ollama_models:/root/.ollama - - ./DSL/DatasetGenerator/ollama-entrypoint.sh:/ollama-entrypoint.sh - entrypoint: ["bash", "/ollama-entrypoint.sh"] - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [gpu] - networks: - - bykstack + # dataset-gen-ollama: + # image: synthesisai/dataset-generator-ollama:latest + # container_name: dataset-gen-ollama + # ports: + # - "11434:11434" + # environment: + # # - NVIDIA_VISIBLE_DEVICES=all + # # - OLLAMA_USE_GPU=1 + # - OLLAMA_HOST=0.0.0.0 + # volumes: + # - dataset_gen_ollama_models:/root/.ollama + # - ./src/dataset-generation/ollama-entrypoint.sh:/ollama-entrypoint.sh + # entrypoint: ["bash", "/ollama-entrypoint.sh"] + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [gpu] + # networks: + # - bykstack - dataset-gen-service: - image: synthesisai/dataset-generator:latest - container_name: dataset-gen-service - ports: - - "8000:8000" - environment: - - PROVIDER_API_URL=http://dataset-gen-ollama:11434 - - SERVICE_DEBUG=false - - MLFLOW_TRACKING_URI=http://dataset-gen-mlflow:5000 - volumes: - - ./DSL/DatasetGenerator/config:/app/config - - ./DSL/DatasetGenerator/templates:/app/templates - - ./DSL/DatasetGenerator/user_configs:/app/user_configs - - cron_data:/app/data - - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets - - ./DSL/DatasetGenerator/logs:/app/logs - depends_on: - - dataset-gen-ollama - - dataset-gen-mlflow - networks: - - bykstack + # dataset-gen-service: + # image: synthesisai/dataset-generator:latest + # container_name: dataset-gen-service + # ports: + # - "8000:8000" + # environment: + # - PROVIDER_API_URL=http://dataset-gen-ollama:11434 + # - SERVICE_DEBUG=false + # - MLFLOW_TRACKING_URI=http://dataset-gen-mlflow:5000 + # volumes: + # - ./DSL/DatasetGenerator/config:/app/config + # - ./DSL/DatasetGenerator/templates:/app/templates + # - ./DSL/DatasetGenerator/user_configs:/app/user_configs + # - cron_data:/app/data + # - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + # - ./DSL/DatasetGenerator/logs:/app/logs + # depends_on: + # - dataset-gen-ollama + # - dataset-gen-mlflow + # networks: + # - bykstack - dataset-gen-mlflow: - image: synthesisai/dataset-generator-mlflow:latest - container_name: dataset-gen-mlflow - ports: - - "5000:5000" - env_file: - - sidecar.env - environment: - - MLFLOW_TRACKING_USERNAME=${MLFLOW_TRACKING_USERNAME} - - MLFLOW_TRACKING_PASSWORD=${MLFLOW_TRACKING_PASSWORD} - - MLFLOW_HOST=${MLFLOW_HOST} - - MLFLOW_PORT=${MLFLOW_PORT} - - MLFLOW_BACKEND_STORE_URI=${MLFLOW_BACKEND_STORE_URI} - - MLFLOW_DEFAULT_ARTIFACT_ROOT=${MLFLOW_DEFAULT_ARTIFACT_ROOT} - - MLFLOW_FLASK_SERVER_SECRET_KEY=${MLFLOW_FLASK_SERVER_SECRET_KEY} - volumes: - - ./DSL/DatasetGenerator/mlflow_data:/mlflow/mlflow_data - - ./DSL/DatasetGenerator/mlflow_artifacts:/mlflow/mlflow_artifacts - networks: - - bykstack + # dataset-gen-mlflow: + # image: synthesisai/dataset-generator-mlflow:latest + # container_name: dataset-gen-mlflow + # ports: + # - "5000:5000" + # env_file: + # - sidecar.env + # environment: + # - MLFLOW_TRACKING_USERNAME=${MLFLOW_TRACKING_USERNAME} + # - MLFLOW_TRACKING_PASSWORD=${MLFLOW_TRACKING_PASSWORD} + # - MLFLOW_HOST=${MLFLOW_HOST} + # - MLFLOW_PORT=${MLFLOW_PORT} + # - MLFLOW_BACKEND_STORE_URI=${MLFLOW_BACKEND_STORE_URI} + # - MLFLOW_DEFAULT_ARTIFACT_ROOT=${MLFLOW_DEFAULT_ARTIFACT_ROOT} + # - MLFLOW_FLASK_SERVER_SECRET_KEY=${MLFLOW_FLASK_SERVER_SECRET_KEY} + # volumes: + # - ./DSL/DatasetGenerator/mlflow_data:/mlflow/mlflow_data + # - ./DSL/DatasetGenerator/mlflow_artifacts:/mlflow/mlflow_artifacts + # networks: + # - bykstack minio: @@ -268,7 +267,6 @@ services: container_name: gc-s3-ferry volumes: - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets - - ./DSL/DatasetGenerator/temp_chunks:/app/temp_chunks env_file: - config.env ports: @@ -277,33 +275,22 @@ services: networks: - bykstack - dataset-file-handler: - container_name: dataset-file-handler - build: - context: ./src/dataset_file_hanlder - dockerfile: Dockerfile + s3-dataset-processor: + container_name: s3-dataset-processor + build: ./src/s3_dataset_processor ports: - - "8003:8000" + - "8001:8001" volumes: - - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets - - ./DSL/DatasetGenerator/temp_chunks:/app/temp_chunks - - ./src/dataset_file_hanlder:/app/src - - cron_data:/app/data + - cron_data:/app/data # Same volume as cron-manager environment: - - PORT=8000 - - PYTHONPATH=/app/src - - LOG_LEVEL=INFO + - PORT=8001 networks: - bykstack - depends_on: - - gc-s3-ferry - - resql healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + test: ["CMD", "curl", "-f", "http://localhost:8001/health"] interval: 30s timeout: 10s retries: 3 - restart: unless-stopped gui: container_name: gui From 2509451ef45ccc2618514e07f4d4e08d282014c4 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Wed, 2 Jul 2025 06:22:30 +0530 Subject: [PATCH 066/195] fixed issues --- DSL/CronManager/DSL/test_pipeline.yml | 5 ----- DSL/CronManager/script/python_test_script.sh | 19 ------------------- 2 files changed, 24 deletions(-) delete mode 100644 DSL/CronManager/DSL/test_pipeline.yml delete mode 100644 DSL/CronManager/script/python_test_script.sh diff --git a/DSL/CronManager/DSL/test_pipeline.yml b/DSL/CronManager/DSL/test_pipeline.yml deleted file mode 100644 index 69b08e98..00000000 --- a/DSL/CronManager/DSL/test_pipeline.yml +++ /dev/null @@ -1,5 +0,0 @@ -test_job: - trigger: off - type: exec - command: "../app/scripts/python_test_script.sh" - allowedEnvs: ['testParam', 'delaySeconds'] \ No newline at end of file diff --git a/DSL/CronManager/script/python_test_script.sh b/DSL/CronManager/script/python_test_script.sh deleted file mode 100644 index 8f3474ee..00000000 --- a/DSL/CronManager/script/python_test_script.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Redirect shell wrapper messages to stderr so only Python JSON goes to stdout -exec 2>&1 - -# Get parameters -TEST_PARAM="${testParam:-default}" -DELAY_SECONDS="${delaySeconds:-0}" - -# Export environment variables for Python script -export testParam="$TEST_PARAM" -export delaySeconds="$DELAY_SECONDS" - -# Execute Python script and capture ONLY its JSON output -# Redirect all shell logging to stderr, keep Python JSON on stdout -python3 /app/src/s3_dataset_processor/python_test.py 2>/dev/null - -# Exit with Python script's exit code -exit $? \ No newline at end of file From 51d65e505adf1db23e0bf8147c7f58958ff03f5a Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Wed, 2 Jul 2025 06:30:50 +0530 Subject: [PATCH 067/195] fixed ruff lint issues --- .../chunks_handler_api.py | 48 +- .../download_source_dataset.py | 41 +- .../fetch_chunk_without_filter.py | 99 ++-- src/dataset_file_hanlder/fetch_multi_chunk.py | 184 ++++--- src/dataset_file_hanlder/models/schemas.py | 7 +- .../multiple_chunk_handler.py | 138 +++--- .../services/download_service.py | 11 +- .../services/extraction_service.py | 13 +- .../services/s3_ferry_service.py | 95 ++-- .../single_chunk_handler.py | 50 +- .../dataset_generation_callback_processor.py | 454 +++++++++++------- ...ataset_generation_callback_processor_v1.py | 201 -------- .../download_source_dataset.py | 41 +- .../fetch_chunk_without_filter.py | 99 ++-- src/s3_dataset_processor/fetch_multi_chunk.py | 184 ++++--- src/s3_dataset_processor/models/schemas.py | 5 + .../services/download_service.py | 11 +- .../services/extraction_service.py | 13 +- .../services/s3_ferry_service.py | 95 ++-- 19 files changed, 948 insertions(+), 841 deletions(-) delete mode 100644 src/s3_dataset_processor/dataset_generation_callback_processor_v1.py diff --git a/src/dataset_file_hanlder/chunks_handler_api.py b/src/dataset_file_hanlder/chunks_handler_api.py index 82d3924d..387a20e1 100644 --- a/src/dataset_file_hanlder/chunks_handler_api.py +++ b/src/dataset_file_hanlder/chunks_handler_api.py @@ -7,7 +7,7 @@ ChunkDownloadRequest, ChunkDownloadResponse, MultiChunkDownloadRequest, - MultiChunkDownloadResponse + MultiChunkDownloadResponse, ) from single_chunk_handler import ChunkService from multiple_chunk_handler import MultiChunkService @@ -28,23 +28,27 @@ async def download_chunk(request: ChunkDownloadRequest): """ Download a single chunk from S3. - + Args: request: Chunk download request containing dataset_id and page_num - + Returns: Chunk data or error information """ try: - logger.info(f"Chunk download request - Dataset: {request.datasetId}, Page: {request.pageNum}") - - result = chunk_service.download_chunk_from_s3(request.datasetId, request.pageNum) - + logger.info( + f"Chunk download request - Dataset: {request.datasetId}, Page: {request.pageNum}" + ) + + result = chunk_service.download_chunk_from_s3( + request.datasetId, request.pageNum + ) + if not result["success"]: raise HTTPException(status_code=400, detail=result) - + return ChunkDownloadResponse(**result) - + except HTTPException: raise except Exception as e: @@ -56,26 +60,30 @@ async def download_chunk(request: ChunkDownloadRequest): async def download_multiple_chunks(request: MultiChunkDownloadRequest): """ Download and aggregate multiple chunks from S3. - + Args: request: Multi-chunk download request containing dataset_id and chunk_ids - + Returns: Aggregated chunk data or error information """ try: - logger.info(f"Multi-chunk download request - Dataset: {request.datasetId}, Chunks: {request.chunkIds}") - + logger.info( + f"Multi-chunk download request - Dataset: {request.datasetId}, Chunks: {request.chunkIds}" + ) + if not request.chunkIds: raise HTTPException(status_code=400, detail="No chunk IDs provided") - - result = multi_chunk_service.download_multiple_chunks(request.datasetId, request.chunkIds) - + + result = multi_chunk_service.download_multiple_chunks( + request.datasetId, request.chunkIds + ) + if not result["success"]: raise HTTPException(status_code=400, detail=result) - + return MultiChunkDownloadResponse(**result) - + except HTTPException: raise except Exception as e: @@ -88,6 +96,8 @@ async def chunk_health_check(): """Health check endpoint for chunk services.""" return {"status": "healthy", "service": "chunk-download"} + if __name__ == "__main__": import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file + + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/src/dataset_file_hanlder/download_source_dataset.py b/src/dataset_file_hanlder/download_source_dataset.py index eeab901c..bc061457 100644 --- a/src/dataset_file_hanlder/download_source_dataset.py +++ b/src/dataset_file_hanlder/download_source_dataset.py @@ -4,6 +4,7 @@ Direct Python script for downloading datasets from S3 signed URLs. Replaces the FastAPI /download-datasets endpoint for CronManager execution. """ + import sys import json import argparse @@ -14,15 +15,15 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules FIRST # This path corresponds to the volume mount in docker-compose.yml -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) # Now import the services AFTER adding to path @@ -31,6 +32,7 @@ from services.download_service import DownloadService from services.extraction_service import ExtractionService from handlers.response_handler import ResponseHandler + logger.info("✅ Successfully imported all required modules") except ImportError as e: logger.error(f"❌ Failed to import required modules: {e}") @@ -40,15 +42,25 @@ logger.error(f"Contents of script directory: {list(script_dir.iterdir())}") sys.exit(1) + def main(): """Main function to handle dataset download process.""" - parser = argparse.ArgumentParser(description='Download datasets from S3 signed URLs') - parser.add_argument('--encoded-data', required=True, help='Base64 encoded signed URLs data') - parser.add_argument('--extract-files', action='store_true', default=True, help='Extract downloaded files') - parser.add_argument('--output-json', help='Output file path for results JSON') - + parser = argparse.ArgumentParser( + description="Download datasets from S3 signed URLs" + ) + parser.add_argument( + "--encoded-data", required=True, help="Base64 encoded signed URLs data" + ) + parser.add_argument( + "--extract-files", + action="store_true", + default=True, + help="Extract downloaded files", + ) + parser.add_argument("--output-json", help="Output file path for results JSON") + args = parser.parse_args() - + try: if not args.encoded_data or not isinstance(args.encoded_data, str): logger.error("'encoded_data' must be a non-empty string") @@ -90,14 +102,16 @@ def main(): # Output results if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(response.dict(), f, indent=2) logger.info(f"Results written to {args.output_json}") else: print(json.dumps(response.dict(), indent=2)) # Log summary - logger.info(f"Download completed: {successful_downloads} successful, {failed_downloads} failed") + logger.info( + f"Download completed: {successful_downloads} successful, {failed_downloads} failed" + ) logger.info(f"Extracted folders: {len(extracted_folders)}") # Exit with appropriate code @@ -112,5 +126,6 @@ def main(): traceback.print_exc() sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/dataset_file_hanlder/fetch_chunk_without_filter.py b/src/dataset_file_hanlder/fetch_chunk_without_filter.py index 2aa5e54d..44814d4a 100644 --- a/src/dataset_file_hanlder/fetch_chunk_without_filter.py +++ b/src/dataset_file_hanlder/fetch_chunk_without_filter.py @@ -3,6 +3,7 @@ Python script to download a specific chunk from S3 bucket and return as JSON. Used by CronManager endpoint to fetch individual chunks. """ + import sys import json import argparse @@ -15,46 +16,50 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stderr)], # Log to stderr to keep stdout clean ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) + def log(message): """Log to stderr to keep stdout clean for JSON output""" logger.info(f"🔍 [CHUNK DOWNLOAD] {message}") + try: from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() log("Successfully imported S3FerryService") except ImportError as e: log(f"Failed to import S3FerryService: {e}") sys.exit(1) + def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: """ Download a specific chunk from S3 bucket. - + Args: dataset_id: Dataset ID page_num: Page number (chunk number) - + Returns: Dictionary containing chunk data or error information """ try: log(f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}") - + # Create temporary directory for download temp_dir = tempfile.mkdtemp(prefix="chunk_download_") log(f"Created temporary directory: {temp_dir}") - + # Define S3 source path and local destination chunk_filename = f"{page_num}.json" s3_source_path = f"{dataset_id}/{chunk_filename}" @@ -64,48 +69,48 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: temp_chunks_dir = "temp_chunks" os.makedirs(temp_chunks_dir, exist_ok=True) log(f"Created/verified temp directory: {temp_chunks_dir}") - + log(f"S3 source path: {s3_source_path}") log(f"Local destination: {local_dest_path}") - + # Download chunk from S3 using S3Ferry service response = s3_ferry_service.transfer_file( destination_file_path=local_dest_path, destination_storage_type="FS", source_file_path=s3_source_path, - source_storage_type="S3" + source_storage_type="S3", ) - + log(f"S3Ferry response status: {response.status_code}") log(f"S3Ferry response body: {response.text}") - + if response.status_code in [200, 201]: # Read the downloaded chunk file local_file_path = f"/app/{local_dest_path}" - + if os.path.exists(local_file_path): log(f"Successfully downloaded chunk to: {local_file_path}") - + # Read and parse the chunk data - with open(local_file_path, 'r', encoding='utf-8') as f: + with open(local_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Clean up the downloaded file os.remove(local_file_path) log(f"Cleaned up downloaded file: {local_file_path}") - + # Remove empty directory if it exists try: os.rmdir(os.path.dirname(local_file_path)) except OSError: pass # Directory not empty or doesn't exist - + return { "success": True, "dataset_id": dataset_id, "page_num": page_num, "chunk_data": chunk_data, - "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}" + "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}", } else: return { @@ -113,7 +118,7 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: "dataset_id": dataset_id, "page_num": page_num, "error": f"Downloaded file not found at: {local_file_path}", - "message": "File download completed but file not accessible" + "message": "File download completed but file not accessible", } else: return { @@ -122,9 +127,9 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: "page_num": page_num, "error": f"S3 download failed: HTTP {response.status_code}", "response_body": response.text, - "message": f"Failed to download chunk {page_num} from S3" + "message": f"Failed to download chunk {page_num} from S3", } - + except Exception as e: log(f"Error during chunk download: {str(e)}") traceback.print_exc() @@ -133,57 +138,63 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: "dataset_id": dataset_id, "page_num": page_num, "error": str(e), - "message": "Internal error during chunk download" + "message": "Internal error during chunk download", } + def main(): """Main function to handle chunk download process.""" - parser = argparse.ArgumentParser(description='Download a specific chunk from S3') - parser.add_argument('--dataset-id', required=True, help='Dataset ID') - parser.add_argument('--page-num', required=True, type=int, help='Page number (chunk number)') - parser.add_argument('--output-json', help='Output file path for results JSON') - + parser = argparse.ArgumentParser(description="Download a specific chunk from S3") + parser.add_argument("--dataset-id", required=True, help="Dataset ID") + parser.add_argument( + "--page-num", required=True, type=int, help="Page number (chunk number)" + ) + parser.add_argument("--output-json", help="Output file path for results JSON") + args = parser.parse_args() - + try: - log(f"Processing chunk download request - Dataset: {args.dataset_id}, Page: {args.page_num}") - + log( + f"Processing chunk download request - Dataset: {args.dataset_id}, Page: {args.page_num}" + ) + # Download the chunk result = download_chunk_from_s3(args.dataset_id, args.page_num) - + # Output results if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(result, f, indent=2) log(f"Results written to {args.output_json}") else: # Output ONLY the JSON to stdout (this goes to CronManager) print(json.dumps(result)) - + log(f"Chunk download completed - Success: {result['success']}") - + # Exit with appropriate code - sys.exit(0 if result['success'] else 1) - + sys.exit(0 if result["success"] else 1) + except Exception as e: log(f"Internal error: {str(e)}") traceback.print_exc() - + error_result = { "success": False, "dataset_id": args.dataset_id, "page_num": args.page_num, "error": str(e), - "message": "Script execution failed" + "message": "Script execution failed", } - + if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(error_result, f, indent=2) else: print(json.dumps(error_result)) - + sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/dataset_file_hanlder/fetch_multi_chunk.py b/src/dataset_file_hanlder/fetch_multi_chunk.py index 18cf8228..8ad5f24a 100644 --- a/src/dataset_file_hanlder/fetch_multi_chunk.py +++ b/src/dataset_file_hanlder/fetch_multi_chunk.py @@ -3,12 +3,12 @@ Python script to download multiple chunks from S3 bucket, aggregate them, and return as JSON. Used by CronManager endpoint to fetch and combine multiple chunks. """ + import sys import json import argparse import logging import os -import tempfile from pathlib import Path import traceback from typing import List, Dict, Any @@ -16,91 +16,95 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stderr)], # Log to stderr to keep stdout clean ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) + def log(message): """Log to stderr to keep stdout clean for JSON output""" logger.info(f"📦 [MULTI CHUNK] {message}") + try: from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() log("Successfully imported S3FerryService") except ImportError as e: log(f"Failed to import S3FerryService: {e}") sys.exit(1) + def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, Any]: """ Download a single chunk from S3 bucket. - + Args: dataset_id: Dataset ID chunk_id: Chunk ID/number - + Returns: Dictionary containing chunk data or error information """ try: log(f"Downloading chunk {chunk_id} from dataset {dataset_id}") - + # Define S3 source path and local destination chunk_filename = f"{chunk_id}.json" s3_source_path = f"{dataset_id}/{chunk_filename}" local_dest_path = f"temp_chunks/{chunk_filename}" - + # Create the temp_chunks directory if it doesn't exist temp_chunks_dir = "/app/temp_chunks" os.makedirs(temp_chunks_dir, exist_ok=True) - + log(f"S3 source path: {s3_source_path}") log(f"Local destination: {local_dest_path}") - + # Download chunk from S3 using S3Ferry service response = s3_ferry_service.transfer_file( destination_file_path=local_dest_path, destination_storage_type="FS", source_file_path=s3_source_path, - source_storage_type="S3" + source_storage_type="S3", ) - + log(f"S3Ferry response status for chunk {chunk_id}: {response.status_code}") - + if response.status_code in [200, 201]: # Read the downloaded chunk file local_file_path = f"/app/{local_dest_path}" - + if os.path.exists(local_file_path): log(f"Successfully downloaded chunk {chunk_id} to: {local_file_path}") - + # Read and parse the chunk data - with open(local_file_path, 'r', encoding='utf-8') as f: + with open(local_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Clean up the downloaded file os.remove(local_file_path) log(f"Cleaned up downloaded file: {local_file_path}") - + return { "success": True, "chunk_id": chunk_id, "chunk_data": chunk_data, - "message": f"Successfully downloaded chunk {chunk_id}" + "message": f"Successfully downloaded chunk {chunk_id}", } else: return { "success": False, "chunk_id": chunk_id, "error": f"Downloaded file not found at: {local_file_path}", - "message": f"Chunk {chunk_id} download completed but file not accessible" + "message": f"Chunk {chunk_id} download completed but file not accessible", } else: return { @@ -108,9 +112,9 @@ def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, A "chunk_id": chunk_id, "error": f"S3 download failed: HTTP {response.status_code}", "response_body": response.text, - "message": f"Failed to download chunk {chunk_id} from S3" + "message": f"Failed to download chunk {chunk_id} from S3", } - + except Exception as e: log(f"Error downloading chunk {chunk_id}: {str(e)}") traceback.print_exc() @@ -118,56 +122,67 @@ def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, A "success": False, "chunk_id": chunk_id, "error": str(e), - "message": f"Internal error during chunk {chunk_id} download" + "message": f"Internal error during chunk {chunk_id} download", } + def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, Any]: """ Download multiple chunks from S3 and aggregate them. - + Args: dataset_id: Dataset ID chunk_ids: List of chunk IDs to download - + Returns: Dictionary containing aggregated chunk data or error information """ try: - log(f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}") - + log( + f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}" + ) + download_results = [] successful_chunks = [] failed_chunks = [] aggregated_data = [] total_items = 0 - + # Download each chunk for chunk_id in chunk_ids: result = download_single_chunk_from_s3(dataset_id, chunk_id) download_results.append(result) - + if result["success"]: successful_chunks.append(chunk_id) chunk_data = result["chunk_data"] - + # Extract data array from chunk chunk_items = chunk_data.get("data", []) aggregated_data.extend(chunk_items) total_items += len(chunk_items) - - log(f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation") + + log( + f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation" + ) else: failed_chunks.append(chunk_id) - log(f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}") - + log( + f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}" + ) + # Prepare chunk info from the first successful chunk (if any) chunk_info = {} if successful_chunks and download_results: first_successful = next((r for r in download_results if r["success"]), None) if first_successful: - original_chunk_info = first_successful["chunk_data"].get("chunk_info", {}) + original_chunk_info = first_successful["chunk_data"].get( + "chunk_info", {} + ) chunk_info = { - "original_dataset": original_chunk_info.get("original_dataset", dataset_id), + "original_dataset": original_chunk_info.get( + "original_dataset", dataset_id + ), "requested_chunks": chunk_ids, "successful_chunks": successful_chunks, "failed_chunks": failed_chunks, @@ -175,9 +190,11 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "successful_downloads": len(successful_chunks), "failed_downloads": len(failed_chunks), "total_aggregated_items": total_items, - "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" if successful_chunks else "none" + "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" + if successful_chunks + else "none", } - + # Prepare the final aggregated payload if successful_chunks: aggregated_payload = { @@ -191,10 +208,10 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "failed_downloads": len(failed_chunks), "successful_chunk_ids": successful_chunks, "failed_chunk_ids": failed_chunks, - "total_items_aggregated": total_items + "total_items_aggregated": total_items, }, "download_details": download_results, - "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks" + "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks", } else: aggregated_payload = { @@ -208,18 +225,20 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "failed_downloads": len(failed_chunks), "successful_chunk_ids": [], "failed_chunk_ids": failed_chunks, - "total_items_aggregated": 0 + "total_items_aggregated": 0, }, "download_details": download_results, "error": "All chunk downloads failed", - "message": f"Failed to download any of the {len(chunk_ids)} requested chunks" + "message": f"Failed to download any of the {len(chunk_ids)} requested chunks", } - - log(f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}") + + log( + f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}" + ) log(f"Total items aggregated: {total_items}") - + return aggregated_payload - + except Exception as e: log(f"Error during multi-chunk aggregation: {str(e)}") traceback.print_exc() @@ -234,85 +253,100 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "failed_downloads": len(chunk_ids), "successful_chunk_ids": [], "failed_chunk_ids": chunk_ids, - "total_items_aggregated": 0 + "total_items_aggregated": 0, }, "error": str(e), - "message": "Internal error during multi-chunk aggregation" + "message": "Internal error during multi-chunk aggregation", } + def parse_chunk_ids(chunk_ids_str: str) -> List[int]: """ Parse chunk IDs from string format "1 2 3" to list [1, 2, 3]. - + Args: chunk_ids_str: String containing space-separated chunk IDs - + Returns: List of integer chunk IDs """ try: # Split by spaces and convert to integers - chunk_ids = [int(chunk_id.strip()) for chunk_id in chunk_ids_str.split() if chunk_id.strip()] + chunk_ids = [ + int(chunk_id.strip()) + for chunk_id in chunk_ids_str.split() + if chunk_id.strip() + ] log(f"Parsed chunk IDs: {chunk_ids}") return chunk_ids except ValueError as e: log(f"Error parsing chunk IDs '{chunk_ids_str}': {str(e)}") - raise ValueError(f"Invalid chunk IDs format. Expected space-separated integers, got: '{chunk_ids_str}'") + raise ValueError( + f"Invalid chunk IDs format. Expected space-separated integers, got: '{chunk_ids_str}'" + ) + def main(): """Main function to handle multi-chunk download and aggregation process.""" - parser = argparse.ArgumentParser(description='Download and aggregate multiple chunks from S3') - parser.add_argument('--dataset-id', required=True, help='Dataset ID') - parser.add_argument('--chunk-ids', required=True, help='Space-separated chunk IDs (e.g., "1 2 3")') - parser.add_argument('--output-json', help='Output file path for results JSON') - + parser = argparse.ArgumentParser( + description="Download and aggregate multiple chunks from S3" + ) + parser.add_argument("--dataset-id", required=True, help="Dataset ID") + parser.add_argument( + "--chunk-ids", required=True, help='Space-separated chunk IDs (e.g., "1 2 3")' + ) + parser.add_argument("--output-json", help="Output file path for results JSON") + args = parser.parse_args() - + try: - log(f"Processing multi-chunk request - Dataset: {args.dataset_id}, Chunk IDs: {args.chunk_ids}") - + log( + f"Processing multi-chunk request - Dataset: {args.dataset_id}, Chunk IDs: {args.chunk_ids}" + ) + # Parse chunk IDs chunk_ids = parse_chunk_ids(args.chunk_ids) - + if not chunk_ids: raise ValueError("No valid chunk IDs provided") - + # Download and aggregate chunks result = download_multiple_chunks(args.dataset_id, chunk_ids) - + # Output results if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(result, f, indent=2) log(f"Results written to {args.output_json}") else: # Output ONLY the JSON to stdout (this goes to CronManager) print(json.dumps(result)) - + log(f"Multi-chunk processing completed - Success: {result['success']}") - + # Exit with appropriate code - sys.exit(0 if result['success'] else 1) - + sys.exit(0 if result["success"] else 1) + except Exception as e: log(f"Internal error: {str(e)}") traceback.print_exc() - + error_result = { "success": False, "dataset_id": args.dataset_id, "chunk_ids": args.chunk_ids, "error": str(e), - "message": "Script execution failed" + "message": "Script execution failed", } - + if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(error_result, f, indent=2) else: print(json.dumps(error_result)) - + sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/dataset_file_hanlder/models/schemas.py b/src/dataset_file_hanlder/models/schemas.py index e91f35b8..30b7c272 100644 --- a/src/dataset_file_hanlder/models/schemas.py +++ b/src/dataset_file_hanlder/models/schemas.py @@ -35,20 +35,24 @@ class DownloadResponse(BaseModel): extracted_folders: List[Dict[str, str]] total_extracted_folders: int + class ChunkDownloadRequest(BaseModel): """Request model for downloading a single chunk.""" + datasetId: str pageNum: int class MultiChunkDownloadRequest(BaseModel): """Request model for downloading multiple chunks.""" + datasetId: str chunkIds: List[int] class ChunkDownloadResponse(BaseModel): """Response model for single chunk download.""" + success: bool dataset_id: str page_num: Optional[int] = None @@ -59,6 +63,7 @@ class ChunkDownloadResponse(BaseModel): class MultiChunkDownloadResponse(BaseModel): """Response model for multi-chunk download and aggregation.""" + success: bool dataset_id: str chunk_info: Optional[Dict[str, Any]] = None @@ -66,4 +71,4 @@ class MultiChunkDownloadResponse(BaseModel): download_summary: Dict[str, Any] download_details: Optional[List[Dict[str, Any]]] = None error: Optional[str] = None - message: str \ No newline at end of file + message: str diff --git a/src/dataset_file_hanlder/multiple_chunk_handler.py b/src/dataset_file_hanlder/multiple_chunk_handler.py index 17703ff7..9fa8aad8 100644 --- a/src/dataset_file_hanlder/multiple_chunk_handler.py +++ b/src/dataset_file_hanlder/multiple_chunk_handler.py @@ -11,71 +11,77 @@ class MultiChunkService: """Service class for handling multiple chunk operations.""" - + def __init__(self): """Initialize the multi-chunk service.""" self.s3_ferry_service = S3Ferry() logger.info("MultiChunkService initialized") - - def download_single_chunk_from_s3(self, dataset_id: str, chunk_id: int) -> Dict[str, Any]: + + def download_single_chunk_from_s3( + self, dataset_id: str, chunk_id: int + ) -> Dict[str, Any]: """ Download a single chunk from S3 bucket. - + Args: dataset_id: Dataset ID chunk_id: Chunk ID to download - + Returns: Dictionary containing download result """ try: logger.info(f"Downloading chunk {chunk_id} for dataset {dataset_id}") - + # Define S3 source path and local destination chunk_filename = f"{chunk_id}.json" s3_source_path = f"{dataset_id}/{chunk_filename}" local_dest_path = f"temp_chunks/{chunk_filename}" - + # Create the temp_chunks directory if it doesn't exist os.makedirs("temp_chunks", exist_ok=True) - + # Download chunk from S3 using S3Ferry service response = self.s3_ferry_service.transfer_file( destination_file_path=local_dest_path, destination_storage_type="FS", source_file_path=s3_source_path, - source_storage_type="S3" + source_storage_type="S3", + ) + + logger.info( + f"S3Ferry response status for chunk {chunk_id}: {response.status_code}" ) - - logger.info(f"S3Ferry response status for chunk {chunk_id}: {response.status_code}") - + if response.status_code in [200, 201]: # Read the downloaded chunk file local_file_path = f"/app/{local_dest_path}" - + if os.path.exists(local_file_path): - logger.info(f"Successfully downloaded chunk {chunk_id} to: {local_file_path}") - + logger.info( + f"Successfully downloaded chunk {chunk_id} to: {local_file_path}" + ) + # Read and parse the chunk data - with open(local_file_path, 'r', encoding='utf-8') as f: + with open(local_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Clean up the downloaded file os.remove(local_file_path) logger.info(f"Cleaned up downloaded file: {local_file_path}") - + return { "success": True, "chunk_id": chunk_id, "chunk_data": chunk_data, - "message": f"Successfully downloaded chunk {chunk_id}" + "message": f"Successfully downloaded chunk {chunk_id}", } else: return { "success": False, "chunk_id": chunk_id, "error": f"Downloaded file not found at: {local_file_path}", - "message": f"Chunk {chunk_id} download completed but file not accessible" + "message": f"Chunk {chunk_id} download completed but file not accessible", } else: return { @@ -83,9 +89,9 @@ def download_single_chunk_from_s3(self, dataset_id: str, chunk_id: int) -> Dict[ "chunk_id": chunk_id, "error": f"S3 download failed: HTTP {response.status_code}", "response_body": response.text, - "message": f"Failed to download chunk {chunk_id} from S3" + "message": f"Failed to download chunk {chunk_id} from S3", } - + except Exception as e: logger.error(f"Error downloading chunk {chunk_id}: {str(e)}") traceback.print_exc() @@ -93,56 +99,70 @@ def download_single_chunk_from_s3(self, dataset_id: str, chunk_id: int) -> Dict[ "success": False, "chunk_id": chunk_id, "error": str(e), - "message": f"Internal error during chunk {chunk_id} download" + "message": f"Internal error during chunk {chunk_id} download", } - - def download_multiple_chunks(self, dataset_id: str, chunk_ids: List[int]) -> Dict[str, Any]: + + def download_multiple_chunks( + self, dataset_id: str, chunk_ids: List[int] + ) -> Dict[str, Any]: """ Download multiple chunks from S3 and aggregate them. - + Args: dataset_id: Dataset ID chunk_ids: List of chunk IDs to download - + Returns: Dictionary containing aggregated chunk data or error information """ try: - logger.info(f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}") - + logger.info( + f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}" + ) + download_results = [] successful_chunks = [] failed_chunks = [] aggregated_data = [] total_items = 0 - + # Download each chunk for chunk_id in chunk_ids: result = self.download_single_chunk_from_s3(dataset_id, chunk_id) download_results.append(result) - + if result["success"]: successful_chunks.append(chunk_id) chunk_data = result["chunk_data"] - + # Extract data array from chunk chunk_items = chunk_data.get("data", []) aggregated_data.extend(chunk_items) total_items += len(chunk_items) - - logger.info(f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation") + + logger.info( + f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation" + ) else: failed_chunks.append(chunk_id) - logger.error(f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}") - + logger.error( + f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}" + ) + # Prepare chunk info from the first successful chunk (if any) chunk_info = {} if successful_chunks and download_results: - first_successful = next((r for r in download_results if r["success"]), None) + first_successful = next( + (r for r in download_results if r["success"]), None + ) if first_successful: - original_chunk_info = first_successful["chunk_data"].get("chunk_info", {}) + original_chunk_info = first_successful["chunk_data"].get( + "chunk_info", {} + ) chunk_info = { - "original_dataset": original_chunk_info.get("original_dataset", dataset_id), + "original_dataset": original_chunk_info.get( + "original_dataset", dataset_id + ), "requested_chunks": chunk_ids, "successful_chunks": successful_chunks, "failed_chunks": failed_chunks, @@ -150,9 +170,11 @@ def download_multiple_chunks(self, dataset_id: str, chunk_ids: List[int]) -> Dic "successful_downloads": len(successful_chunks), "failed_downloads": len(failed_chunks), "total_aggregated_items": total_items, - "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" if successful_chunks else "none" + "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" + if successful_chunks + else "none", } - + # Prepare the final aggregated payload if successful_chunks: aggregated_payload = { @@ -166,10 +188,10 @@ def download_multiple_chunks(self, dataset_id: str, chunk_ids: List[int]) -> Dic "failed_downloads": len(failed_chunks), "successful_chunk_ids": successful_chunks, "failed_chunk_ids": failed_chunks, - "total_items_aggregated": total_items + "total_items_aggregated": total_items, }, "download_details": download_results, - "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks" + "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks", } else: aggregated_payload = { @@ -183,18 +205,20 @@ def download_multiple_chunks(self, dataset_id: str, chunk_ids: List[int]) -> Dic "failed_downloads": len(failed_chunks), "successful_chunk_ids": [], "failed_chunk_ids": failed_chunks, - "total_items_aggregated": 0 + "total_items_aggregated": 0, }, "download_details": download_results, "error": "All chunk downloads failed", - "message": f"Failed to download any of the {len(chunk_ids)} requested chunks" + "message": f"Failed to download any of the {len(chunk_ids)} requested chunks", } - - logger.info(f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}") + + logger.info( + f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}" + ) logger.info(f"Total items aggregated: {total_items}") - + return aggregated_payload - + except Exception as e: logger.error(f"Error during multi-chunk aggregation: {str(e)}") traceback.print_exc() @@ -209,26 +233,28 @@ def download_multiple_chunks(self, dataset_id: str, chunk_ids: List[int]) -> Dic "failed_downloads": len(chunk_ids), "successful_chunk_ids": [], "failed_chunk_ids": chunk_ids, - "total_items_aggregated": 0 + "total_items_aggregated": 0, }, "error": str(e), - "message": "Internal error during multi-chunk aggregation" + "message": "Internal error during multi-chunk aggregation", } - + def parse_chunk_ids(self, chunk_ids_str: str) -> List[int]: """ Parse chunk IDs from string format. - + Args: chunk_ids_str: Space-separated chunk IDs (e.g., "1 2 3") - + Returns: List of chunk IDs as integers """ try: - chunk_ids = [int(x.strip()) for x in chunk_ids_str.split() if x.strip().isdigit()] + chunk_ids = [ + int(x.strip()) for x in chunk_ids_str.split() if x.strip().isdigit() + ] logger.info(f"Parsed chunk IDs: {chunk_ids}") return chunk_ids except Exception as e: logger.error(f"Error parsing chunk IDs '{chunk_ids_str}': {str(e)}") - return [] \ No newline at end of file + return [] diff --git a/src/dataset_file_hanlder/services/download_service.py b/src/dataset_file_hanlder/services/download_service.py index 1ca27964..146eaeac 100644 --- a/src/dataset_file_hanlder/services/download_service.py +++ b/src/dataset_file_hanlder/services/download_service.py @@ -8,12 +8,13 @@ from models.schemas import DownloadedFile import sys import logging + # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) @@ -88,7 +89,9 @@ def process_downloads(self, decoded_data: List[Dict[str, Any]]) -> tuple: # Download file to data directory local_file_path = os.path.join(self.data_dir, original_filename) - logger.info(f"Downloading {original_filename} for agency {agency_id} with name {agency_name}") + logger.info( + f"Downloading {original_filename} for agency {agency_id} with name {agency_name}" + ) if self.download_file(signed_url, local_file_path): file_size = os.path.getsize(local_file_path) diff --git a/src/dataset_file_hanlder/services/extraction_service.py b/src/dataset_file_hanlder/services/extraction_service.py index a81318de..8b937234 100644 --- a/src/dataset_file_hanlder/services/extraction_service.py +++ b/src/dataset_file_hanlder/services/extraction_service.py @@ -9,12 +9,13 @@ from models.schemas import DownloadedFile import sys import logging + # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) @@ -91,7 +92,11 @@ def process_extractions( # Add to extracted folders list extracted_folders.append( - {"agency_id": downloaded_file.agency_id, "agency_name": downloaded_file.agency_name, "folder_path": agency_dir} + { + "agency_id": downloaded_file.agency_id, + "agency_name": downloaded_file.agency_name, + "folder_path": agency_dir, + } ) # Remove the ZIP file after successful extraction diff --git a/src/dataset_file_hanlder/services/s3_ferry_service.py b/src/dataset_file_hanlder/services/s3_ferry_service.py index a7e3d2fc..0c0cfafe 100644 --- a/src/dataset_file_hanlder/services/s3_ferry_service.py +++ b/src/dataset_file_hanlder/services/s3_ferry_service.py @@ -3,18 +3,19 @@ import requests import logging import traceback -from typing import Dict, Any +from typing import Dict # Configure logging logger = logging.getLogger(__name__) + class S3Ferry: """Service class for handling S3Ferry file transfer operations.""" - + def __init__(self, base_url: str = "http://gc-s3-ferry:3000"): """ Initialize the S3Ferry service. - + Args: base_url: Base URL for the S3Ferry service """ @@ -22,64 +23,78 @@ def __init__(self, base_url: str = "http://gc-s3-ferry:3000"): self.url = f"{base_url}/v1/files/copy" logger.info(f"S3Ferry service initialized with URL: {self.url}") - def transfer_file(self, destination_file_path: str, destination_storage_type: str, - source_file_path: str, source_storage_type: str) -> requests.Response: + def transfer_file( + self, + destination_file_path: str, + destination_storage_type: str, + source_file_path: str, + source_storage_type: str, + ) -> requests.Response: """ Transfer a file using S3Ferry service. - + Args: destination_file_path: Path where the file should be stored in destination destination_storage_type: Type of destination storage (e.g., 's3', 'local') source_file_path: Path of the source file source_storage_type: Type of source storage (e.g., 'local', 's3') - + Returns: Response object from the S3Ferry service """ try: payload = self.get_s3_ferry_payload( - destination_file_path, - destination_storage_type, - source_file_path, - source_storage_type + destination_file_path, + destination_storage_type, + source_file_path, + source_storage_type, + ) + + logger.info( + f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}" ) - - logger.info(f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}") logger.debug(f"[S3_FERRY] Payload: {payload}") - + response = requests.post( - self.url, + self.url, json=payload, headers={"Content-Type": "application/json"}, - timeout=60 + timeout=60, ) - + logger.info(f"[S3_FERRY] Transfer response status: {response.status_code}") - + # Accept both 200 (OK) and 201 (Created) as success if response.status_code not in [200, 201]: logger.error(f"[S3_FERRY] Transfer failed: {response.text}") else: - logger.info(f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})") - + logger.info( + f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})" + ) + return response - + except Exception as e: logger.error(f"[S3_FERRY] Error during file transfer: {str(e)}") traceback.print_exc() raise - def get_s3_ferry_payload(self, destination_file_path: str, destination_storage_type: str, - source_file_path: str, source_storage_type: str) -> Dict[str, str]: + def get_s3_ferry_payload( + self, + destination_file_path: str, + destination_storage_type: str, + source_file_path: str, + source_storage_type: str, + ) -> Dict[str, str]: """ Generate S3Ferry payload for file transfer. - + Args: destination_file_path: Path where the file should be stored in destination destination_storage_type: Type of destination storage source_file_path: Path of the source file source_storage_type: Type of source storage - + Returns: Dictionary containing the S3Ferry payload """ @@ -87,19 +102,21 @@ def get_s3_ferry_payload(self, destination_file_path: str, destination_storage_t "destinationFilePath": destination_file_path, "destinationStorageType": destination_storage_type, "sourceFilePath": source_file_path, - "sourceStorageType": source_storage_type + "sourceStorageType": source_storage_type, } - + return payload - def upload_to_s3(self, local_file_path: str, s3_destination_path: str) -> requests.Response: + def upload_to_s3( + self, local_file_path: str, s3_destination_path: str + ) -> requests.Response: """ Convenience method to upload a local file to S3. - + Args: local_file_path: Path to the local file s3_destination_path: S3 destination path (e.g., 'bucket/folder/file.json') - + Returns: Response object from the S3Ferry service """ @@ -107,17 +124,19 @@ def upload_to_s3(self, local_file_path: str, s3_destination_path: str) -> reques destination_file_path=s3_destination_path, destination_storage_type="S3", source_file_path=local_file_path, - source_storage_type="FS" + source_storage_type="FS", ) - def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> requests.Response: + def download_from_s3( + self, s3_source_path: str, local_destination_path: str + ) -> requests.Response: """ Convenience method to download a file from S3 to local storage. - + Args: s3_source_path: S3 source path (e.g., 'bucket/folder/file.json') local_destination_path: Local destination path - + Returns: Response object from the S3Ferry service """ @@ -125,17 +144,17 @@ def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> destination_file_path=local_destination_path, destination_storage_type="local", source_file_path=s3_source_path, - source_storage_type="s3" + source_storage_type="s3", ) # def copy_s3_to_s3(self, source_s3_path: str, destination_s3_path: str) -> requests.Response: # """ # Convenience method to copy files between S3 locations. - + # Args: # source_s3_path: Source S3 path # destination_s3_path: Destination S3 path - + # Returns: # Response object from the S3Ferry service # """ @@ -144,4 +163,4 @@ def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> # destination_storage_type="s3", # source_file_path=source_s3_path, # source_storage_type="s3" - # ) \ No newline at end of file + # ) diff --git a/src/dataset_file_hanlder/single_chunk_handler.py b/src/dataset_file_hanlder/single_chunk_handler.py index 5760e8d7..5e8efa9b 100644 --- a/src/dataset_file_hanlder/single_chunk_handler.py +++ b/src/dataset_file_hanlder/single_chunk_handler.py @@ -12,30 +12,32 @@ class ChunkService: """Service class for handling single chunk operations.""" - + def __init__(self): """Initialize the chunk service.""" self.s3_ferry_service = S3Ferry() logger.info("ChunkService initialized") - + def download_chunk_from_s3(self, dataset_id: str, page_num: int) -> Dict[str, Any]: """ Download a specific chunk from S3 bucket. - + Args: dataset_id: Dataset ID page_num: Page number (chunk number) - + Returns: Dictionary containing chunk data or error information """ try: - logger.info(f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}") - + logger.info( + f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}" + ) + # Create temporary directory for download temp_dir = tempfile.mkdtemp(prefix="chunk_download_") logger.info(f"Created temporary directory: {temp_dir}") - + # Define S3 source path and local destination chunk_filename = f"{page_num}.json" s3_source_path = f"{dataset_id}/{chunk_filename}" @@ -45,48 +47,48 @@ def download_chunk_from_s3(self, dataset_id: str, page_num: int) -> Dict[str, An temp_chunks_dir = "temp_chunks" os.makedirs(temp_chunks_dir, exist_ok=True) logger.info(f"Created/verified temp directory: {temp_chunks_dir}") - + logger.info(f"S3 source path: {s3_source_path}") logger.info(f"Local destination: {local_dest_path}") - + # Download chunk from S3 using S3Ferry service response = self.s3_ferry_service.transfer_file( destination_file_path=local_dest_path, destination_storage_type="FS", source_file_path=s3_source_path, - source_storage_type="S3" + source_storage_type="S3", ) - + logger.info(f"S3Ferry response status: {response.status_code}") logger.info(f"S3Ferry response body: {response.text}") - + if response.status_code in [200, 201]: # Read the downloaded chunk file local_file_path = f"/app/{local_dest_path}" - + if os.path.exists(local_file_path): logger.info(f"Successfully downloaded chunk to: {local_file_path}") - + # Read and parse the chunk data - with open(local_file_path, 'r', encoding='utf-8') as f: + with open(local_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Clean up the downloaded file os.remove(local_file_path) logger.info(f"Cleaned up downloaded file: {local_file_path}") - + # Remove empty directory if it exists try: os.rmdir(os.path.dirname(local_file_path)) except OSError: pass # Directory not empty or doesn't exist - + return { "success": True, "dataset_id": dataset_id, "page_num": page_num, "chunk_data": chunk_data, - "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}" + "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}", } else: return { @@ -94,7 +96,7 @@ def download_chunk_from_s3(self, dataset_id: str, page_num: int) -> Dict[str, An "dataset_id": dataset_id, "page_num": page_num, "error": f"Downloaded file not found at: {local_file_path}", - "message": "File download completed but file not accessible" + "message": "File download completed but file not accessible", } else: return { @@ -103,9 +105,9 @@ def download_chunk_from_s3(self, dataset_id: str, page_num: int) -> Dict[str, An "page_num": page_num, "error": f"S3 download failed: HTTP {response.status_code}", "response_body": response.text, - "message": f"Failed to download chunk {page_num} from S3" + "message": f"Failed to download chunk {page_num} from S3", } - + except Exception as e: logger.error(f"Error during chunk download: {str(e)}") traceback.print_exc() @@ -114,5 +116,5 @@ def download_chunk_from_s3(self, dataset_id: str, page_num: int) -> Dict[str, An "dataset_id": dataset_id, "page_num": page_num, "error": str(e), - "message": "Internal error during chunk download" - } \ No newline at end of file + "message": "Internal error during chunk download", + } diff --git a/src/s3_dataset_processor/dataset_generation_callback_processor.py b/src/s3_dataset_processor/dataset_generation_callback_processor.py index b73ea390..dd05a28c 100644 --- a/src/s3_dataset_processor/dataset_generation_callback_processor.py +++ b/src/s3_dataset_processor/dataset_generation_callback_processor.py @@ -3,6 +3,7 @@ Standalone script for processing dataset generation callbacks. Replaces the FastAPI background task with direct synchronous execution. """ + import sys import json import argparse @@ -16,14 +17,14 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) logger.info(f"🔍 Script directory: {script_dir}") @@ -33,6 +34,7 @@ try: from services.url_decoder_service import URLDecoderService from services.s3_ferry_service import S3Ferry + url_decoder_service = URLDecoderService() s3_ferry_service = S3Ferry() logger.info("✅ Successfully imported URLDecoderService") @@ -43,56 +45,59 @@ if script_dir.exists(): logger.error(f"Contents of script directory: {list(script_dir.iterdir())}") # Try to check services directory - services_dir = script_dir / 'services' + services_dir = script_dir / "services" if services_dir.exists(): - logger.error(f"Contents of services directory: {list(services_dir.iterdir())}") + logger.error( + f"Contents of services directory: {list(services_dir.iterdir())}" + ) traceback.print_exc() sys.exit(1) + def chunk_dataset(dataset_path: str, chunk_size: int = 5): """ Chunk the generated dataset into smaller files with specified number of records. - + Args: dataset_path: Path to the generated dataset JSON file chunk_size: Number of records per chunk (default: 5) - + Returns: List of chunk file paths created """ try: logger.info(f"[CHUNKING] Starting chunking for: {dataset_path}") - + # Read the original dataset - with open(dataset_path, 'r', encoding='utf-8') as f: + with open(dataset_path, "r", encoding="utf-8") as f: dataset = json.load(f) - - aggregated_data = dataset.get('aggregated_data', []) + + aggregated_data = dataset.get("aggregated_data", []) total_items = len(aggregated_data) - + logger.info(f"[CHUNKING] Total items to chunk: {total_items}") logger.info(f"[CHUNKING] Chunk size: {chunk_size}") - + if total_items == 0: logger.warning("[CHUNKING] No data to chunk") return [] - + # Create chunks directory dataset_name = Path(dataset_path).stem chunks_dir = Path(dataset_path).parent / f"{dataset_name}_chunks" chunks_dir.mkdir(exist_ok=True) - + chunk_files = [] - + # Create chunks with incremental naming (1.json, 2.json, etc.) for i in range(0, total_items, chunk_size): - chunk_data = aggregated_data[i:i + chunk_size] + chunk_data = aggregated_data[i : i + chunk_size] chunk_number = (i // chunk_size) + 1 - + # Use simple incremental naming: 1.json, 2.json, 3.json, etc. chunk_filename = f"{chunk_number}.json" chunk_path = chunks_dir / chunk_filename - + # Create chunk with metadata chunk_content = { "chunk_info": { @@ -100,71 +105,80 @@ def chunk_dataset(dataset_path: str, chunk_size: int = 5): "chunk_number": chunk_number, "total_chunks": (total_items + chunk_size - 1) // chunk_size, "items_in_chunk": len(chunk_data), - "chunk_range": f"{i + 1}-{min(i + chunk_size, total_items)}" + "chunk_range": f"{i + 1}-{min(i + chunk_size, total_items)}", }, - "data": chunk_data + "data": chunk_data, } - + # Write chunk file - with open(chunk_path, 'w', encoding='utf-8') as f: + with open(chunk_path, "w", encoding="utf-8") as f: json.dump(chunk_content, f, indent=2, ensure_ascii=False) - + chunk_files.append(str(chunk_path)) - logger.info(f"[CHUNKING] Created chunk {chunk_number}: {chunk_filename} ({len(chunk_data)} items)") - - logger.info(f"[CHUNKING] ✅ Created {len(chunk_files)} chunk files in: {chunks_dir}") + logger.info( + f"[CHUNKING] Created chunk {chunk_number}: {chunk_filename} ({len(chunk_data)} items)" + ) + + logger.info( + f"[CHUNKING] ✅ Created {len(chunk_files)} chunk files in: {chunks_dir}" + ) return chunk_files - + except Exception as e: logger.error(f"[CHUNKING] ❌ Error during chunking: {str(e)}") traceback.print_exc() raise + def upload_chunks_to_s3(chunk_files: list, dataset_id: str): """ Upload chunk files to S3 using S3Ferry service. - + Args: chunk_files: List of chunk file paths to upload dataset_id: Dataset ID for organizing uploads - + Returns: List of upload results with S3 URLs """ try: - logger.info(f"[S3_UPLOAD] Starting S3 upload for {len(chunk_files)} chunks using S3Ferry service") - + logger.info( + f"[S3_UPLOAD] Starting S3 upload for {len(chunk_files)} chunks using S3Ferry service" + ) + upload_results = [] - + for chunk_file in chunk_files: # Extract just the filename (e.g., "1.json", "2.json") chunk_filename = Path(chunk_file).name - + logger.info(f"[S3_UPLOAD] Processing chunk file: {chunk_file}") logger.info(f"[S3_UPLOAD] Chunk filename: {chunk_filename}") - + # Create the exact payload format you specified # destinationFilePath: "/{dataset_id}/{filename}" (e.g., "/3/2.json") destination_file_path = f"/{dataset_id}/{chunk_filename}" - + # sourceFilePath: relative path from gc-s3-ferry's volume mount (e.g., "output_datasets/3_chunks/2.json") source_file_path = f"output_datasets/{dataset_id}_chunks/{chunk_filename}" - + logger.info(f"[S3_UPLOAD] Destination path: {destination_file_path}") logger.info(f"[S3_UPLOAD] Source path: {source_file_path}") - + try: # Use S3Ferry service with exact payload format response = s3_ferry_service.transfer_file( - destination_file_path=destination_file_path, # "/3/2.json" - destination_storage_type="S3", # "S3" - source_file_path=source_file_path, # "output_datasets/3_chunks/2.json" - source_storage_type="FS" # "FS" + destination_file_path=destination_file_path, # "/3/2.json" + destination_storage_type="S3", # "S3" + source_file_path=source_file_path, # "output_datasets/3_chunks/2.json" + source_storage_type="FS", # "FS" + ) + + logger.info( + f"[S3_UPLOAD] S3Ferry response status: {response.status_code}" ) - - logger.info(f"[S3_UPLOAD] S3Ferry response status: {response.status_code}") logger.info(f"[S3_UPLOAD] S3Ferry response body: {response.text}") - + # Accept both 200 (OK) and 201 (Created) as success if response.status_code in [200, 201]: # Parse response if needed @@ -173,135 +187,170 @@ def upload_chunks_to_s3(chunk_files: list, dataset_id: str): response_data = response.json() if response.text else {} except Exception: pass - - - upload_results.append({ - "chunk_file": chunk_filename, - "destination_path": destination_file_path, - "source_path": source_file_path, - "success": True, - "response": response_data, - "status_code": response.status_code - }) - logger.info(f"[S3_UPLOAD] ✅ Uploaded: {chunk_filename} -> (HTTP {response.status_code})") + + upload_results.append( + { + "chunk_file": chunk_filename, + "destination_path": destination_file_path, + "source_path": source_file_path, + "success": True, + "response": response_data, + "status_code": response.status_code, + } + ) + logger.info( + f"[S3_UPLOAD] ✅ Uploaded: {chunk_filename} -> (HTTP {response.status_code})" + ) else: - upload_results.append({ + upload_results.append( + { + "chunk_file": chunk_filename, + "error": f"HTTP {response.status_code}: {response.text}", + "success": False, + "source_path": source_file_path, + "status_code": response.status_code, + } + ) + logger.error( + f"[S3_UPLOAD] ❌ Failed to upload {chunk_filename}: HTTP {response.status_code}" + ) + logger.error(f"[S3_UPLOAD] Response: {response.text}") + + except requests.exceptions.RequestException as e: + upload_results.append( + { "chunk_file": chunk_filename, - "error": f"HTTP {response.status_code}: {response.text}", + "error": str(e), "success": False, "source_path": source_file_path, - "status_code": response.status_code - }) - logger.error(f"[S3_UPLOAD] ❌ Failed to upload {chunk_filename}: HTTP {response.status_code}") - logger.error(f"[S3_UPLOAD] Response: {response.text}") - - except requests.exceptions.RequestException as e: - upload_results.append({ - "chunk_file": chunk_filename, - "error": str(e), - "success": False, - "source_path": source_file_path - }) - logger.error(f"[S3_UPLOAD] ❌ Request failed for {chunk_filename}: {str(e)}") + } + ) + logger.error( + f"[S3_UPLOAD] ❌ Request failed for {chunk_filename}: {str(e)}" + ) traceback.print_exc() - + except Exception as e: - upload_results.append({ - "chunk_file": chunk_filename, - "error": str(e), - "success": False, - "source_path": source_file_path - }) - logger.error(f"[S3_UPLOAD] ❌ Unexpected error for {chunk_filename}: {str(e)}") + upload_results.append( + { + "chunk_file": chunk_filename, + "error": str(e), + "success": False, + "source_path": source_file_path, + } + ) + logger.error( + f"[S3_UPLOAD] ❌ Unexpected error for {chunk_filename}: {str(e)}" + ) traceback.print_exc() - - successful_uploads = [r for r in upload_results if r.get('success', False)] - failed_uploads = [r for r in upload_results if not r.get('success', False)] - - logger.info(f"[S3_UPLOAD] ✅ Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed") - + + successful_uploads = [r for r in upload_results if r.get("success", False)] + failed_uploads = [r for r in upload_results if not r.get("success", False)] + + logger.info( + f"[S3_UPLOAD] ✅ Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed" + ) + # Log detailed results if successful_uploads: logger.info("[S3_UPLOAD] 📊 Successful uploads:") for result in successful_uploads: - status_code = result.get('status_code', 'unknown') - logger.info(f"[S3_UPLOAD] - {result['chunk_file']}: {result['s3_url']} (HTTP {status_code})") - + status_code = result.get("status_code", "unknown") + logger.info( + f"[S3_UPLOAD] - {result['chunk_file']}: {result['s3_url']} (HTTP {status_code})" + ) + if failed_uploads: logger.warning("[S3_UPLOAD] ⚠️ Failed uploads:") for result in failed_uploads: - logger.warning(f"[S3_UPLOAD] - {result['chunk_file']}: {result['error']}") - + logger.warning( + f"[S3_UPLOAD] - {result['chunk_file']}: {result['error']}" + ) + return upload_results - + except Exception as e: logger.error(f"[S3_UPLOAD] ❌ Error during S3 upload: {str(e)}") traceback.print_exc() raise + def update_chunk_metadata(chunk_file_path: str, dataset_id: str, chunk_number: int): """ Update chunk metadata in the database after successful S3 upload. - + Args: chunk_file_path: Path to the chunk file to extract agency information - dataset_id: Dataset ID + dataset_id: Dataset ID chunk_number: Chunk number (1, 2, 3, etc.) - + Returns: Response from the metadata update endpoint """ try: - logger.info(f"[CHUNK_METADATA] Updating metadata for chunk {chunk_number} of dataset {dataset_id}") - + logger.info( + f"[CHUNK_METADATA] Updating metadata for chunk {chunk_number} of dataset {dataset_id}" + ) + # Read the chunk file to extract agency information - with open(chunk_file_path, 'r', encoding='utf-8') as f: + with open(chunk_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Extract agency IDs from the chunk data - chunk_items = chunk_data.get('data', []) + chunk_items = chunk_data.get("data", []) agency_ids = [] - + for item in chunk_items: - agency_id = item.get('agency_id', 'unknown') + agency_id = item.get("agency_id", "unknown") agency_ids.append(agency_id) - - logger.info(f"[CHUNK_METADATA] Extracted {len(agency_ids)} agency IDs: {agency_ids}") - + + logger.info( + f"[CHUNK_METADATA] Extracted {len(agency_ids)} agency IDs: {agency_ids}" + ) + # Create the payload for the metadata endpoint metadata_payload = { "datasetId": int(dataset_id), "chunkId": chunk_number, - "includedAgencies": json.dumps(agency_ids) # Convert array to JSON string + "includedAgencies": json.dumps(agency_ids), # Convert array to JSON string } - - logger.info(f"[CHUNK_METADATA] Payload: {json.dumps(metadata_payload, indent=2)}") - + + logger.info( + f"[CHUNK_METADATA] Payload: {json.dumps(metadata_payload, indent=2)}" + ) + # Send POST request to the chunk metadata endpoint - CHUNK_METADATA_URL = "http://resql:8082/global-classifier/update-data-chunk-metadata" - + CHUNK_METADATA_URL = ( + "http://resql:8082/global-classifier/update-data-chunk-metadata" + ) + response = requests.post( CHUNK_METADATA_URL, json=metadata_payload, headers={"Content-Type": "application/json"}, - timeout=30 + timeout=30, ) - + logger.info(f"[CHUNK_METADATA] Response status: {response.status_code}") logger.info(f"[CHUNK_METADATA] Response body: {response.text}") - + if response.status_code == 200: - logger.info(f"[CHUNK_METADATA] ✅ Successfully updated metadata for chunk {chunk_number}") + logger.info( + f"[CHUNK_METADATA] ✅ Successfully updated metadata for chunk {chunk_number}" + ) else: - logger.warning(f"[CHUNK_METADATA] ⚠️ Metadata update failed for chunk {chunk_number}: HTTP {response.status_code}") - + logger.warning( + f"[CHUNK_METADATA] ⚠️ Metadata update failed for chunk {chunk_number}: HTTP {response.status_code}" + ) + return response - + except Exception as e: logger.error(f"[CHUNK_METADATA] ❌ Error updating chunk metadata: {str(e)}") traceback.print_exc() raise + def process_callback_background(file_path: str, encoded_results: str): """ Process the dataset generation callback with chunking and S3 upload. @@ -325,94 +374,125 @@ def process_callback_background(file_path: str, encoded_results: str): if os.path.exists(full_dataset_path): logger.info(f"[CALLBACK] Found dataset file: {full_dataset_path}") # Step 2.1: Upload the original aggregated dataset file - logger.info("[CALLBACK] 📤 Starting upload of original aggregated dataset...") + logger.info( + "[CALLBACK] 📤 Starting upload of original aggregated dataset..." + ) try: # Upload the original dataset file to S3 original_dataset_response = s3_ferry_service.transfer_file( destination_file_path=f"/{dataset_id}/aggregated_dataset.json", # "/3/aggregated_dataset.json" destination_storage_type="S3", - source_file_path=f"output_datasets/{dataset_id}.json", # "output_datasets/3.json" - source_storage_type="FS" + source_file_path=f"output_datasets/{dataset_id}.json", # "output_datasets/3.json" + source_storage_type="FS", ) - - logger.info(f"[CALLBACK] Original dataset upload status: {original_dataset_response.status_code}") - logger.info(f"[CALLBACK] Original dataset upload response: {original_dataset_response.text}") - + + logger.info( + f"[CALLBACK] Original dataset upload status: {original_dataset_response.status_code}" + ) + logger.info( + f"[CALLBACK] Original dataset upload response: {original_dataset_response.text}" + ) + if original_dataset_response.status_code in [200, 201]: - original_s3_url = f"s3://global-classifier/{dataset_id}/aggregated_dataset.json" - logger.info(f"[CALLBACK] ✅ Original dataset uploaded: {original_s3_url}") - + original_s3_url = ( + f"s3://global-classifier/{dataset_id}/aggregated_dataset.json" + ) + logger.info( + f"[CALLBACK] ✅ Original dataset uploaded: {original_s3_url}" + ) + else: - logger.error(f"[CALLBACK] ❌ Failed to upload original dataset: HTTP {original_dataset_response.status_code}") - - + logger.error( + f"[CALLBACK] ❌ Failed to upload original dataset: HTTP {original_dataset_response.status_code}" + ) + except Exception as e: - logger.error(f"[CALLBACK] ❌ Error uploading original dataset: {str(e)}") + logger.error( + f"[CALLBACK] ❌ Error uploading original dataset: {str(e)}" + ) traceback.print_exc() # Step 2.2: Chunk the generated dataset chunk_files = chunk_dataset(full_dataset_path, chunk_size=5) - + # Step 3: Upload chunks to S3 if chunk_files: upload_results = upload_chunks_to_s3(chunk_files, dataset_id) - + # Log upload summary - successful_uploads = [r for r in upload_results if r.get('success', False)] - logger.info(f"[CALLBACK] 📊 S3 Upload Summary:") + successful_uploads = [ + r for r in upload_results if r.get("success", False) + ] + logger.info("[CALLBACK] 📊 S3 Upload Summary:") logger.info(f"[CALLBACK] - Total chunks: {len(chunk_files)}") - logger.info(f"[CALLBACK] - Successful uploads: {len(successful_uploads)}") - logger.info(f"[CALLBACK] - Failed uploads: {len(upload_results) - len(successful_uploads)}") - + logger.info( + f"[CALLBACK] - Successful uploads: {len(successful_uploads)}" + ) + logger.info( + f"[CALLBACK] - Failed uploads: {len(upload_results) - len(successful_uploads)}" + ) + # Log S3 URLs for result in successful_uploads: - logger.info(f"[CALLBACK] - {result['chunk_file']}: {result['s3_url']}") + logger.info( + f"[CALLBACK] - {result['chunk_file']}: {result['s3_url']}" + ) # Step 3.5: Update chunk metadata for successfully uploaded chunks logger.info("[CALLBACK] 🔄 Starting chunk metadata updates...") metadata_results = [] for i, chunk_file in enumerate(chunk_files): chunk_number = i + 1 # Chunks are numbered 1, 2, 3, etc. - + # Only update metadata for successfully uploaded chunks chunk_filename = Path(chunk_file).name upload_success = any( - result['chunk_file'] == chunk_filename and result['success'] + result["chunk_file"] == chunk_filename and result["success"] for result in upload_results ) - + if upload_success: try: metadata_response = update_chunk_metadata( chunk_file_path=chunk_file, dataset_id=dataset_id, - chunk_number=chunk_number + chunk_number=chunk_number, ) - - metadata_results.append({ - "chunk_number": chunk_number, - "chunk_file": chunk_filename, - "success": metadata_response.status_code == 200, - "response": metadata_response.text - }) - + + metadata_results.append( + { + "chunk_number": chunk_number, + "chunk_file": chunk_filename, + "success": metadata_response.status_code == 200, + "response": metadata_response.text, + } + ) + except Exception as e: - logger.error(f"[CALLBACK] ❌ Failed to update metadata for chunk {chunk_number}: {str(e)}") - metadata_results.append({ + logger.error( + f"[CALLBACK] ❌ Failed to update metadata for chunk {chunk_number}: {str(e)}" + ) + metadata_results.append( + { + "chunk_number": chunk_number, + "chunk_file": chunk_filename, + "success": False, + "error": str(e), + } + ) + else: + logger.warning( + f"[CALLBACK] ⚠️ Skipping metadata update for chunk {chunk_number} (upload failed)" + ) + metadata_results.append( + { "chunk_number": chunk_number, "chunk_file": chunk_filename, "success": False, - "error": str(e) - }) - else: - logger.warning(f"[CALLBACK] ⚠️ Skipping metadata update for chunk {chunk_number} (upload failed)") - metadata_results.append({ - "chunk_number": chunk_number, - "chunk_file": chunk_filename, - "success": False, - "error": "Upload failed" - }) - + "error": "Upload failed", + } + ) + else: logger.warning("[CALLBACK] No chunks created, skipping S3 upload") else: @@ -516,56 +596,62 @@ def process_callback_background(file_path: str, encoded_results: str): traceback.print_exc() raise + def main(): """Main function to handle callback processing.""" - parser = argparse.ArgumentParser(description='Process dataset generation callback') - parser.add_argument('--file-path', required=True, help='File path of the generated dataset') - parser.add_argument('--encoded-results', required=True, help='Encoded results string') - parser.add_argument('--output-json', help='Output JSON file path for response') - + parser = argparse.ArgumentParser(description="Process dataset generation callback") + parser.add_argument( + "--file-path", required=True, help="File path of the generated dataset" + ) + parser.add_argument( + "--encoded-results", required=True, help="Encoded results string" + ) + parser.add_argument("--output-json", help="Output JSON file path for response") + args = parser.parse_args() - + try: logger.info("🔄 Starting callback processing...") logger.info(f"File path: {args.file_path}") logger.info(f"Encoded results length: {len(args.encoded_results)} characters") - + # Process the callback directly (synchronous execution) process_callback_background(args.file_path, args.encoded_results) - + # Create response response = { "message": "Callback processing completed successfully", "status": "completed", - "file_path": args.file_path + "file_path": args.file_path, } - + # Output response to file if specified if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(response, f, indent=2) logger.info(f"✅ Response written to: {args.output_json}") - + # Also output to stdout for shell script print(json.dumps(response)) - + logger.info("✅ Callback processing completed successfully") - + except Exception as e: logger.error(f"❌ Error processing callback: {str(e)}") traceback.print_exc() error_response = { "message": f"Callback processing failed: {str(e)}", "status": "error", - "file_path": args.file_path + "file_path": args.file_path, } - + if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(error_response, f, indent=2) - + print(json.dumps(error_response)) sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/s3_dataset_processor/dataset_generation_callback_processor_v1.py b/src/s3_dataset_processor/dataset_generation_callback_processor_v1.py deleted file mode 100644 index 07fa52fb..00000000 --- a/src/s3_dataset_processor/dataset_generation_callback_processor_v1.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -Standalone script for processing dataset generation callbacks. -Replaces the FastAPI background task with direct synchronous execution. -""" -import sys -import json -import argparse -import logging -import re -import requests -from pathlib import Path -import traceback - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] -) -logger = logging.getLogger(__name__) - -# Add the s3_dataset_processor to Python path to import modules -script_dir = Path('/app/src/s3_dataset_processor') -sys.path.insert(0, str(script_dir)) - -try: - from services.url_decoder_service import URLDecoderService - url_decoder_service = URLDecoderService() - logger.info("✅ Successfully imported URLDecoderService") -except ImportError as e: - logger.error(f"❌ Failed to import URLDecoderService: {e}") - traceback.print_exc() - sys.exit(1) - -def process_callback_background(file_path: str, encoded_results: str): - """ - Process the dataset generation callback. - This is the same function from s3_processor_api.py but now runs synchronously. - """ - try: - print(f"[CALLBACK] Starting processing for: {file_path}") - - # Extract dataset ID from file path (e.g., output_datasets/single_question/12.json -> 12) - dataset_id_match = re.search(r"/([^/]+)\.json$", file_path) - dataset_id = dataset_id_match.group(1) if dataset_id_match else "unknown" - - logger.info(f"[CALLBACK] Extracted dataset ID: {dataset_id}") - - # Decode the results using the existing service - decoded_results = url_decoder_service.decode_signed_urls(encoded_results) - - logger.info(f"[CALLBACK] Decoded {len(decoded_results)} results") - - # Process the decoded results to create the required payload - agencies = [] - overall_success = True - - for i, result in enumerate(decoded_results): - # Extract agency_id from dataset_metadata - dataset_metadata = result.get("dataset_metadata", {}) - agency_id = dataset_metadata.get("agency_id", "unknown") - success = result.get("success", False) - - sync_status = "Synced_with_CKB" if success else "Sync_with_CKB_Failed" - - agencies.append({"agencyId": agency_id, "syncStatus": sync_status}) - - logger.info( - f"[CALLBACK] Agency {i + 1}: ID={agency_id}, Success={success}, Status={sync_status}" - ) - - if not success: - overall_success = False - - generation_status = ( - "Generation_Success" if overall_success else "Generation_Failed" - ) - - # Create the exact payload format requested - callback_payload = { - "agencies": agencies, - "datasetId": dataset_id, - "generationStatus": generation_status, - } - - # Log the processed callback - logger.info(f"[CALLBACK] {json.dumps(callback_payload, indent=2)}") - logger.info(f"[CALLBACK] Dataset ID: {dataset_id}") - logger.info(f"[CALLBACK] Generation Status: {generation_status}") - logger.info(f"[CALLBACK] Total Agencies: {len(agencies)}") - logger.info( - f"[CALLBACK] Successful Agencies: {len([a for a in agencies if a['syncStatus'] == 'Synced_with_CKB'])}" - ) - logger.info( - f"[CALLBACK] Failed Agencies: {len([a for a in agencies if a['syncStatus'] == 'Sync_with_CKB_Failed'])}" - ) - - STATUS_UPDATE_URL = ( - "http://ruuter-public:8086/global-classifier/agencies/data/generation" - ) - - logger.info(f"[CALLBACK] Sending callback payload to: {STATUS_UPDATE_URL}") - - try: - # Send POST request to the status update endpoint - response = requests.post( - STATUS_UPDATE_URL, - json=callback_payload, - headers={"Content-Type": "application/json"}, - timeout=30, - ) - - logger.info( - f"[CALLBACK] Status update response - HTTP Status: {response.status_code}" - ) - logger.info(f"[CALLBACK] Status update response body: {response.text}") - - if response.status_code == 200: - logger.info( - "[CALLBACK] ✅ Successfully sent callback payload to status update endpoint" - ) - else: - logger.warning( - f"[CALLBACK] ⚠️ Status update endpoint returned non-200 status: {response.status_code}" - ) - logger.info(f"[CALLBACK] Response: {response.text}") - - except requests.exceptions.RequestException as webhook_error: - logger.error( - f"[CALLBACK] ❌ Error sending callback to status update endpoint: {str(webhook_error)}" - ) - logger.debug(f"[CALLBACK] URL: {STATUS_UPDATE_URL}") - logger.debug( - f"[CALLBACK] Payload: {json.dumps(callback_payload, indent=2)}" - ) - - except Exception as unexpected_error: - logger.error( - f"[CALLBACK] ❌ Unexpected error during status update: {str(unexpected_error)}" - ) - - logger.info("[CALLBACK] Processing completed successfully") - - except Exception as e: - logger.error(f"[CALLBACK] Error in processing: {str(e)}") - raise - -def main(): - """Main function to handle callback processing.""" - parser = argparse.ArgumentParser(description='Process dataset generation callback') - parser.add_argument('--file-path', required=True, help='File path of the generated dataset') - parser.add_argument('--encoded-results', required=True, help='Encoded results string') - parser.add_argument('--output-json', help='Output JSON file path for response') - - args = parser.parse_args() - - try: - logger.info("🔄 Starting callback processing...") - logger.info(f"File path: {args.file_path}") - logger.info(f"Encoded results length: {len(args.encoded_results)} characters") - - # Process the callback directly (synchronous execution) - process_callback_background(args.file_path, args.encoded_results) - - # Create response - response = { - "message": "Callback processing completed successfully", - "status": "completed", - "file_path": args.file_path - } - - # Output response to file if specified - if args.output_json: - with open(args.output_json, 'w') as f: - json.dump(response, f, indent=2) - logger.info(f"✅ Response written to: {args.output_json}") - - # Also output to stdout for shell script - print(json.dumps(response)) - - logger.info("✅ Callback processing completed successfully") - - except Exception as e: - logger.error(f"❌ Error processing callback: {str(e)}") - error_response = { - "message": f"Callback processing failed: {str(e)}", - "status": "error", - "file_path": args.file_path - } - - if args.output_json: - with open(args.output_json, 'w') as f: - json.dump(error_response, f, indent=2) - - print(json.dumps(error_response)) - traceback.print_exc() - sys.exit(1) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/src/s3_dataset_processor/download_source_dataset.py b/src/s3_dataset_processor/download_source_dataset.py index eeab901c..bc061457 100644 --- a/src/s3_dataset_processor/download_source_dataset.py +++ b/src/s3_dataset_processor/download_source_dataset.py @@ -4,6 +4,7 @@ Direct Python script for downloading datasets from S3 signed URLs. Replaces the FastAPI /download-datasets endpoint for CronManager execution. """ + import sys import json import argparse @@ -14,15 +15,15 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules FIRST # This path corresponds to the volume mount in docker-compose.yml -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) # Now import the services AFTER adding to path @@ -31,6 +32,7 @@ from services.download_service import DownloadService from services.extraction_service import ExtractionService from handlers.response_handler import ResponseHandler + logger.info("✅ Successfully imported all required modules") except ImportError as e: logger.error(f"❌ Failed to import required modules: {e}") @@ -40,15 +42,25 @@ logger.error(f"Contents of script directory: {list(script_dir.iterdir())}") sys.exit(1) + def main(): """Main function to handle dataset download process.""" - parser = argparse.ArgumentParser(description='Download datasets from S3 signed URLs') - parser.add_argument('--encoded-data', required=True, help='Base64 encoded signed URLs data') - parser.add_argument('--extract-files', action='store_true', default=True, help='Extract downloaded files') - parser.add_argument('--output-json', help='Output file path for results JSON') - + parser = argparse.ArgumentParser( + description="Download datasets from S3 signed URLs" + ) + parser.add_argument( + "--encoded-data", required=True, help="Base64 encoded signed URLs data" + ) + parser.add_argument( + "--extract-files", + action="store_true", + default=True, + help="Extract downloaded files", + ) + parser.add_argument("--output-json", help="Output file path for results JSON") + args = parser.parse_args() - + try: if not args.encoded_data or not isinstance(args.encoded_data, str): logger.error("'encoded_data' must be a non-empty string") @@ -90,14 +102,16 @@ def main(): # Output results if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(response.dict(), f, indent=2) logger.info(f"Results written to {args.output_json}") else: print(json.dumps(response.dict(), indent=2)) # Log summary - logger.info(f"Download completed: {successful_downloads} successful, {failed_downloads} failed") + logger.info( + f"Download completed: {successful_downloads} successful, {failed_downloads} failed" + ) logger.info(f"Extracted folders: {len(extracted_folders)}") # Exit with appropriate code @@ -112,5 +126,6 @@ def main(): traceback.print_exc() sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/s3_dataset_processor/fetch_chunk_without_filter.py b/src/s3_dataset_processor/fetch_chunk_without_filter.py index 2aa5e54d..44814d4a 100644 --- a/src/s3_dataset_processor/fetch_chunk_without_filter.py +++ b/src/s3_dataset_processor/fetch_chunk_without_filter.py @@ -3,6 +3,7 @@ Python script to download a specific chunk from S3 bucket and return as JSON. Used by CronManager endpoint to fetch individual chunks. """ + import sys import json import argparse @@ -15,46 +16,50 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stderr)], # Log to stderr to keep stdout clean ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) + def log(message): """Log to stderr to keep stdout clean for JSON output""" logger.info(f"🔍 [CHUNK DOWNLOAD] {message}") + try: from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() log("Successfully imported S3FerryService") except ImportError as e: log(f"Failed to import S3FerryService: {e}") sys.exit(1) + def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: """ Download a specific chunk from S3 bucket. - + Args: dataset_id: Dataset ID page_num: Page number (chunk number) - + Returns: Dictionary containing chunk data or error information """ try: log(f"Starting chunk download - Dataset ID: {dataset_id}, Page: {page_num}") - + # Create temporary directory for download temp_dir = tempfile.mkdtemp(prefix="chunk_download_") log(f"Created temporary directory: {temp_dir}") - + # Define S3 source path and local destination chunk_filename = f"{page_num}.json" s3_source_path = f"{dataset_id}/{chunk_filename}" @@ -64,48 +69,48 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: temp_chunks_dir = "temp_chunks" os.makedirs(temp_chunks_dir, exist_ok=True) log(f"Created/verified temp directory: {temp_chunks_dir}") - + log(f"S3 source path: {s3_source_path}") log(f"Local destination: {local_dest_path}") - + # Download chunk from S3 using S3Ferry service response = s3_ferry_service.transfer_file( destination_file_path=local_dest_path, destination_storage_type="FS", source_file_path=s3_source_path, - source_storage_type="S3" + source_storage_type="S3", ) - + log(f"S3Ferry response status: {response.status_code}") log(f"S3Ferry response body: {response.text}") - + if response.status_code in [200, 201]: # Read the downloaded chunk file local_file_path = f"/app/{local_dest_path}" - + if os.path.exists(local_file_path): log(f"Successfully downloaded chunk to: {local_file_path}") - + # Read and parse the chunk data - with open(local_file_path, 'r', encoding='utf-8') as f: + with open(local_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Clean up the downloaded file os.remove(local_file_path) log(f"Cleaned up downloaded file: {local_file_path}") - + # Remove empty directory if it exists try: os.rmdir(os.path.dirname(local_file_path)) except OSError: pass # Directory not empty or doesn't exist - + return { "success": True, "dataset_id": dataset_id, "page_num": page_num, "chunk_data": chunk_data, - "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}" + "message": f"Successfully downloaded chunk {page_num} for dataset {dataset_id}", } else: return { @@ -113,7 +118,7 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: "dataset_id": dataset_id, "page_num": page_num, "error": f"Downloaded file not found at: {local_file_path}", - "message": "File download completed but file not accessible" + "message": "File download completed but file not accessible", } else: return { @@ -122,9 +127,9 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: "page_num": page_num, "error": f"S3 download failed: HTTP {response.status_code}", "response_body": response.text, - "message": f"Failed to download chunk {page_num} from S3" + "message": f"Failed to download chunk {page_num} from S3", } - + except Exception as e: log(f"Error during chunk download: {str(e)}") traceback.print_exc() @@ -133,57 +138,63 @@ def download_chunk_from_s3(dataset_id: str, page_num: int) -> dict: "dataset_id": dataset_id, "page_num": page_num, "error": str(e), - "message": "Internal error during chunk download" + "message": "Internal error during chunk download", } + def main(): """Main function to handle chunk download process.""" - parser = argparse.ArgumentParser(description='Download a specific chunk from S3') - parser.add_argument('--dataset-id', required=True, help='Dataset ID') - parser.add_argument('--page-num', required=True, type=int, help='Page number (chunk number)') - parser.add_argument('--output-json', help='Output file path for results JSON') - + parser = argparse.ArgumentParser(description="Download a specific chunk from S3") + parser.add_argument("--dataset-id", required=True, help="Dataset ID") + parser.add_argument( + "--page-num", required=True, type=int, help="Page number (chunk number)" + ) + parser.add_argument("--output-json", help="Output file path for results JSON") + args = parser.parse_args() - + try: - log(f"Processing chunk download request - Dataset: {args.dataset_id}, Page: {args.page_num}") - + log( + f"Processing chunk download request - Dataset: {args.dataset_id}, Page: {args.page_num}" + ) + # Download the chunk result = download_chunk_from_s3(args.dataset_id, args.page_num) - + # Output results if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(result, f, indent=2) log(f"Results written to {args.output_json}") else: # Output ONLY the JSON to stdout (this goes to CronManager) print(json.dumps(result)) - + log(f"Chunk download completed - Success: {result['success']}") - + # Exit with appropriate code - sys.exit(0 if result['success'] else 1) - + sys.exit(0 if result["success"] else 1) + except Exception as e: log(f"Internal error: {str(e)}") traceback.print_exc() - + error_result = { "success": False, "dataset_id": args.dataset_id, "page_num": args.page_num, "error": str(e), - "message": "Script execution failed" + "message": "Script execution failed", } - + if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(error_result, f, indent=2) else: print(json.dumps(error_result)) - + sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/s3_dataset_processor/fetch_multi_chunk.py b/src/s3_dataset_processor/fetch_multi_chunk.py index 18cf8228..8ad5f24a 100644 --- a/src/s3_dataset_processor/fetch_multi_chunk.py +++ b/src/s3_dataset_processor/fetch_multi_chunk.py @@ -3,12 +3,12 @@ Python script to download multiple chunks from S3 bucket, aggregate them, and return as JSON. Used by CronManager endpoint to fetch and combine multiple chunks. """ + import sys import json import argparse import logging import os -import tempfile from pathlib import Path import traceback from typing import List, Dict, Any @@ -16,91 +16,95 @@ # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stderr)] # Log to stderr to keep stdout clean + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stderr)], # Log to stderr to keep stdout clean ) logger = logging.getLogger(__name__) # Add the s3_dataset_processor to Python path to import modules -script_dir = Path('/app/src/s3_dataset_processor') +script_dir = Path("/app/src/s3_dataset_processor") sys.path.insert(0, str(script_dir)) + def log(message): """Log to stderr to keep stdout clean for JSON output""" logger.info(f"📦 [MULTI CHUNK] {message}") + try: from services.s3_ferry_service import S3Ferry + s3_ferry_service = S3Ferry() log("Successfully imported S3FerryService") except ImportError as e: log(f"Failed to import S3FerryService: {e}") sys.exit(1) + def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, Any]: """ Download a single chunk from S3 bucket. - + Args: dataset_id: Dataset ID chunk_id: Chunk ID/number - + Returns: Dictionary containing chunk data or error information """ try: log(f"Downloading chunk {chunk_id} from dataset {dataset_id}") - + # Define S3 source path and local destination chunk_filename = f"{chunk_id}.json" s3_source_path = f"{dataset_id}/{chunk_filename}" local_dest_path = f"temp_chunks/{chunk_filename}" - + # Create the temp_chunks directory if it doesn't exist temp_chunks_dir = "/app/temp_chunks" os.makedirs(temp_chunks_dir, exist_ok=True) - + log(f"S3 source path: {s3_source_path}") log(f"Local destination: {local_dest_path}") - + # Download chunk from S3 using S3Ferry service response = s3_ferry_service.transfer_file( destination_file_path=local_dest_path, destination_storage_type="FS", source_file_path=s3_source_path, - source_storage_type="S3" + source_storage_type="S3", ) - + log(f"S3Ferry response status for chunk {chunk_id}: {response.status_code}") - + if response.status_code in [200, 201]: # Read the downloaded chunk file local_file_path = f"/app/{local_dest_path}" - + if os.path.exists(local_file_path): log(f"Successfully downloaded chunk {chunk_id} to: {local_file_path}") - + # Read and parse the chunk data - with open(local_file_path, 'r', encoding='utf-8') as f: + with open(local_file_path, "r", encoding="utf-8") as f: chunk_data = json.load(f) - + # Clean up the downloaded file os.remove(local_file_path) log(f"Cleaned up downloaded file: {local_file_path}") - + return { "success": True, "chunk_id": chunk_id, "chunk_data": chunk_data, - "message": f"Successfully downloaded chunk {chunk_id}" + "message": f"Successfully downloaded chunk {chunk_id}", } else: return { "success": False, "chunk_id": chunk_id, "error": f"Downloaded file not found at: {local_file_path}", - "message": f"Chunk {chunk_id} download completed but file not accessible" + "message": f"Chunk {chunk_id} download completed but file not accessible", } else: return { @@ -108,9 +112,9 @@ def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, A "chunk_id": chunk_id, "error": f"S3 download failed: HTTP {response.status_code}", "response_body": response.text, - "message": f"Failed to download chunk {chunk_id} from S3" + "message": f"Failed to download chunk {chunk_id} from S3", } - + except Exception as e: log(f"Error downloading chunk {chunk_id}: {str(e)}") traceback.print_exc() @@ -118,56 +122,67 @@ def download_single_chunk_from_s3(dataset_id: str, chunk_id: int) -> Dict[str, A "success": False, "chunk_id": chunk_id, "error": str(e), - "message": f"Internal error during chunk {chunk_id} download" + "message": f"Internal error during chunk {chunk_id} download", } + def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, Any]: """ Download multiple chunks from S3 and aggregate them. - + Args: dataset_id: Dataset ID chunk_ids: List of chunk IDs to download - + Returns: Dictionary containing aggregated chunk data or error information """ try: - log(f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}") - + log( + f"Starting multi-chunk download - Dataset ID: {dataset_id}, Chunks: {chunk_ids}" + ) + download_results = [] successful_chunks = [] failed_chunks = [] aggregated_data = [] total_items = 0 - + # Download each chunk for chunk_id in chunk_ids: result = download_single_chunk_from_s3(dataset_id, chunk_id) download_results.append(result) - + if result["success"]: successful_chunks.append(chunk_id) chunk_data = result["chunk_data"] - + # Extract data array from chunk chunk_items = chunk_data.get("data", []) aggregated_data.extend(chunk_items) total_items += len(chunk_items) - - log(f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation") + + log( + f"✅ Chunk {chunk_id}: {len(chunk_items)} items added to aggregation" + ) else: failed_chunks.append(chunk_id) - log(f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}") - + log( + f"❌ Chunk {chunk_id}: Download failed - {result.get('error', 'Unknown error')}" + ) + # Prepare chunk info from the first successful chunk (if any) chunk_info = {} if successful_chunks and download_results: first_successful = next((r for r in download_results if r["success"]), None) if first_successful: - original_chunk_info = first_successful["chunk_data"].get("chunk_info", {}) + original_chunk_info = first_successful["chunk_data"].get( + "chunk_info", {} + ) chunk_info = { - "original_dataset": original_chunk_info.get("original_dataset", dataset_id), + "original_dataset": original_chunk_info.get( + "original_dataset", dataset_id + ), "requested_chunks": chunk_ids, "successful_chunks": successful_chunks, "failed_chunks": failed_chunks, @@ -175,9 +190,11 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "successful_downloads": len(successful_chunks), "failed_downloads": len(failed_chunks), "total_aggregated_items": total_items, - "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" if successful_chunks else "none" + "aggregation_range": f"chunks {min(successful_chunks)}-{max(successful_chunks)}" + if successful_chunks + else "none", } - + # Prepare the final aggregated payload if successful_chunks: aggregated_payload = { @@ -191,10 +208,10 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "failed_downloads": len(failed_chunks), "successful_chunk_ids": successful_chunks, "failed_chunk_ids": failed_chunks, - "total_items_aggregated": total_items + "total_items_aggregated": total_items, }, "download_details": download_results, - "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks" + "message": f"Successfully aggregated {len(successful_chunks)} out of {len(chunk_ids)} requested chunks", } else: aggregated_payload = { @@ -208,18 +225,20 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "failed_downloads": len(failed_chunks), "successful_chunk_ids": [], "failed_chunk_ids": failed_chunks, - "total_items_aggregated": 0 + "total_items_aggregated": 0, }, "download_details": download_results, "error": "All chunk downloads failed", - "message": f"Failed to download any of the {len(chunk_ids)} requested chunks" + "message": f"Failed to download any of the {len(chunk_ids)} requested chunks", } - - log(f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}") + + log( + f"Multi-chunk aggregation completed - Success: {aggregated_payload['success']}" + ) log(f"Total items aggregated: {total_items}") - + return aggregated_payload - + except Exception as e: log(f"Error during multi-chunk aggregation: {str(e)}") traceback.print_exc() @@ -234,85 +253,100 @@ def download_multiple_chunks(dataset_id: str, chunk_ids: List[int]) -> Dict[str, "failed_downloads": len(chunk_ids), "successful_chunk_ids": [], "failed_chunk_ids": chunk_ids, - "total_items_aggregated": 0 + "total_items_aggregated": 0, }, "error": str(e), - "message": "Internal error during multi-chunk aggregation" + "message": "Internal error during multi-chunk aggregation", } + def parse_chunk_ids(chunk_ids_str: str) -> List[int]: """ Parse chunk IDs from string format "1 2 3" to list [1, 2, 3]. - + Args: chunk_ids_str: String containing space-separated chunk IDs - + Returns: List of integer chunk IDs """ try: # Split by spaces and convert to integers - chunk_ids = [int(chunk_id.strip()) for chunk_id in chunk_ids_str.split() if chunk_id.strip()] + chunk_ids = [ + int(chunk_id.strip()) + for chunk_id in chunk_ids_str.split() + if chunk_id.strip() + ] log(f"Parsed chunk IDs: {chunk_ids}") return chunk_ids except ValueError as e: log(f"Error parsing chunk IDs '{chunk_ids_str}': {str(e)}") - raise ValueError(f"Invalid chunk IDs format. Expected space-separated integers, got: '{chunk_ids_str}'") + raise ValueError( + f"Invalid chunk IDs format. Expected space-separated integers, got: '{chunk_ids_str}'" + ) + def main(): """Main function to handle multi-chunk download and aggregation process.""" - parser = argparse.ArgumentParser(description='Download and aggregate multiple chunks from S3') - parser.add_argument('--dataset-id', required=True, help='Dataset ID') - parser.add_argument('--chunk-ids', required=True, help='Space-separated chunk IDs (e.g., "1 2 3")') - parser.add_argument('--output-json', help='Output file path for results JSON') - + parser = argparse.ArgumentParser( + description="Download and aggregate multiple chunks from S3" + ) + parser.add_argument("--dataset-id", required=True, help="Dataset ID") + parser.add_argument( + "--chunk-ids", required=True, help='Space-separated chunk IDs (e.g., "1 2 3")' + ) + parser.add_argument("--output-json", help="Output file path for results JSON") + args = parser.parse_args() - + try: - log(f"Processing multi-chunk request - Dataset: {args.dataset_id}, Chunk IDs: {args.chunk_ids}") - + log( + f"Processing multi-chunk request - Dataset: {args.dataset_id}, Chunk IDs: {args.chunk_ids}" + ) + # Parse chunk IDs chunk_ids = parse_chunk_ids(args.chunk_ids) - + if not chunk_ids: raise ValueError("No valid chunk IDs provided") - + # Download and aggregate chunks result = download_multiple_chunks(args.dataset_id, chunk_ids) - + # Output results if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(result, f, indent=2) log(f"Results written to {args.output_json}") else: # Output ONLY the JSON to stdout (this goes to CronManager) print(json.dumps(result)) - + log(f"Multi-chunk processing completed - Success: {result['success']}") - + # Exit with appropriate code - sys.exit(0 if result['success'] else 1) - + sys.exit(0 if result["success"] else 1) + except Exception as e: log(f"Internal error: {str(e)}") traceback.print_exc() - + error_result = { "success": False, "dataset_id": args.dataset_id, "chunk_ids": args.chunk_ids, "error": str(e), - "message": "Script execution failed" + "message": "Script execution failed", } - + if args.output_json: - with open(args.output_json, 'w') as f: + with open(args.output_json, "w") as f: json.dump(error_result, f, indent=2) else: print(json.dumps(error_result)) - + sys.exit(1) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/s3_dataset_processor/models/schemas.py b/src/s3_dataset_processor/models/schemas.py index c31722ce..a3e2f1df 100644 --- a/src/s3_dataset_processor/models/schemas.py +++ b/src/s3_dataset_processor/models/schemas.py @@ -35,20 +35,24 @@ class DownloadResponse(BaseModel): extracted_folders: List[Dict[str, str]] total_extracted_folders: int + class ChunkDownloadRequest(BaseModel): """Request model for downloading a single chunk.""" + dataset_id: str page_num: int class MultiChunkDownloadRequest(BaseModel): """Request model for downloading multiple chunks.""" + dataset_id: str chunk_ids: List[int] class ChunkDownloadResponse(BaseModel): """Response model for single chunk download.""" + success: bool dataset_id: str page_num: Optional[int] = None @@ -59,6 +63,7 @@ class ChunkDownloadResponse(BaseModel): class MultiChunkDownloadResponse(BaseModel): """Response model for multi-chunk download and aggregation.""" + success: bool dataset_id: str chunk_info: Optional[Dict[str, Any]] = None diff --git a/src/s3_dataset_processor/services/download_service.py b/src/s3_dataset_processor/services/download_service.py index 1ca27964..146eaeac 100644 --- a/src/s3_dataset_processor/services/download_service.py +++ b/src/s3_dataset_processor/services/download_service.py @@ -8,12 +8,13 @@ from models.schemas import DownloadedFile import sys import logging + # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) @@ -88,7 +89,9 @@ def process_downloads(self, decoded_data: List[Dict[str, Any]]) -> tuple: # Download file to data directory local_file_path = os.path.join(self.data_dir, original_filename) - logger.info(f"Downloading {original_filename} for agency {agency_id} with name {agency_name}") + logger.info( + f"Downloading {original_filename} for agency {agency_id} with name {agency_name}" + ) if self.download_file(signed_url, local_file_path): file_size = os.path.getsize(local_file_path) diff --git a/src/s3_dataset_processor/services/extraction_service.py b/src/s3_dataset_processor/services/extraction_service.py index a81318de..8b937234 100644 --- a/src/s3_dataset_processor/services/extraction_service.py +++ b/src/s3_dataset_processor/services/extraction_service.py @@ -9,12 +9,13 @@ from models.schemas import DownloadedFile import sys import logging + # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s | %(levelname)s | %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - handlers=[logging.StreamHandler(sys.stdout)] + format="%(asctime)s | %(levelname)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) @@ -91,7 +92,11 @@ def process_extractions( # Add to extracted folders list extracted_folders.append( - {"agency_id": downloaded_file.agency_id, "agency_name": downloaded_file.agency_name, "folder_path": agency_dir} + { + "agency_id": downloaded_file.agency_id, + "agency_name": downloaded_file.agency_name, + "folder_path": agency_dir, + } ) # Remove the ZIP file after successful extraction diff --git a/src/s3_dataset_processor/services/s3_ferry_service.py b/src/s3_dataset_processor/services/s3_ferry_service.py index a7e3d2fc..0c0cfafe 100644 --- a/src/s3_dataset_processor/services/s3_ferry_service.py +++ b/src/s3_dataset_processor/services/s3_ferry_service.py @@ -3,18 +3,19 @@ import requests import logging import traceback -from typing import Dict, Any +from typing import Dict # Configure logging logger = logging.getLogger(__name__) + class S3Ferry: """Service class for handling S3Ferry file transfer operations.""" - + def __init__(self, base_url: str = "http://gc-s3-ferry:3000"): """ Initialize the S3Ferry service. - + Args: base_url: Base URL for the S3Ferry service """ @@ -22,64 +23,78 @@ def __init__(self, base_url: str = "http://gc-s3-ferry:3000"): self.url = f"{base_url}/v1/files/copy" logger.info(f"S3Ferry service initialized with URL: {self.url}") - def transfer_file(self, destination_file_path: str, destination_storage_type: str, - source_file_path: str, source_storage_type: str) -> requests.Response: + def transfer_file( + self, + destination_file_path: str, + destination_storage_type: str, + source_file_path: str, + source_storage_type: str, + ) -> requests.Response: """ Transfer a file using S3Ferry service. - + Args: destination_file_path: Path where the file should be stored in destination destination_storage_type: Type of destination storage (e.g., 's3', 'local') source_file_path: Path of the source file source_storage_type: Type of source storage (e.g., 'local', 's3') - + Returns: Response object from the S3Ferry service """ try: payload = self.get_s3_ferry_payload( - destination_file_path, - destination_storage_type, - source_file_path, - source_storage_type + destination_file_path, + destination_storage_type, + source_file_path, + source_storage_type, + ) + + logger.info( + f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}" ) - - logger.info(f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}") logger.debug(f"[S3_FERRY] Payload: {payload}") - + response = requests.post( - self.url, + self.url, json=payload, headers={"Content-Type": "application/json"}, - timeout=60 + timeout=60, ) - + logger.info(f"[S3_FERRY] Transfer response status: {response.status_code}") - + # Accept both 200 (OK) and 201 (Created) as success if response.status_code not in [200, 201]: logger.error(f"[S3_FERRY] Transfer failed: {response.text}") else: - logger.info(f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})") - + logger.info( + f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})" + ) + return response - + except Exception as e: logger.error(f"[S3_FERRY] Error during file transfer: {str(e)}") traceback.print_exc() raise - def get_s3_ferry_payload(self, destination_file_path: str, destination_storage_type: str, - source_file_path: str, source_storage_type: str) -> Dict[str, str]: + def get_s3_ferry_payload( + self, + destination_file_path: str, + destination_storage_type: str, + source_file_path: str, + source_storage_type: str, + ) -> Dict[str, str]: """ Generate S3Ferry payload for file transfer. - + Args: destination_file_path: Path where the file should be stored in destination destination_storage_type: Type of destination storage source_file_path: Path of the source file source_storage_type: Type of source storage - + Returns: Dictionary containing the S3Ferry payload """ @@ -87,19 +102,21 @@ def get_s3_ferry_payload(self, destination_file_path: str, destination_storage_t "destinationFilePath": destination_file_path, "destinationStorageType": destination_storage_type, "sourceFilePath": source_file_path, - "sourceStorageType": source_storage_type + "sourceStorageType": source_storage_type, } - + return payload - def upload_to_s3(self, local_file_path: str, s3_destination_path: str) -> requests.Response: + def upload_to_s3( + self, local_file_path: str, s3_destination_path: str + ) -> requests.Response: """ Convenience method to upload a local file to S3. - + Args: local_file_path: Path to the local file s3_destination_path: S3 destination path (e.g., 'bucket/folder/file.json') - + Returns: Response object from the S3Ferry service """ @@ -107,17 +124,19 @@ def upload_to_s3(self, local_file_path: str, s3_destination_path: str) -> reques destination_file_path=s3_destination_path, destination_storage_type="S3", source_file_path=local_file_path, - source_storage_type="FS" + source_storage_type="FS", ) - def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> requests.Response: + def download_from_s3( + self, s3_source_path: str, local_destination_path: str + ) -> requests.Response: """ Convenience method to download a file from S3 to local storage. - + Args: s3_source_path: S3 source path (e.g., 'bucket/folder/file.json') local_destination_path: Local destination path - + Returns: Response object from the S3Ferry service """ @@ -125,17 +144,17 @@ def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> destination_file_path=local_destination_path, destination_storage_type="local", source_file_path=s3_source_path, - source_storage_type="s3" + source_storage_type="s3", ) # def copy_s3_to_s3(self, source_s3_path: str, destination_s3_path: str) -> requests.Response: # """ # Convenience method to copy files between S3 locations. - + # Args: # source_s3_path: Source S3 path # destination_s3_path: Destination S3 path - + # Returns: # Response object from the S3Ferry service # """ @@ -144,4 +163,4 @@ def download_from_s3(self, s3_source_path: str, local_destination_path: str) -> # destination_storage_type="s3", # source_file_path=source_s3_path, # source_storage_type="s3" - # ) \ No newline at end of file + # ) From e5d3a087ee959390fb02a08b6605ee772ff9d40a Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Wed, 2 Jul 2025 16:06:10 +0530 Subject: [PATCH 068/195] feat: update data model configuration and validation, add new SQL scripts --- DSL/Liquibase/master.yml | 4 + GUI/src/App.tsx | 3 + .../pages/DataModels/ConfigureDataModel.tsx | 202 +++--------------- GUI/src/types/dataModels.ts | 1 - GUI/src/utils/dataModelsUtils.ts | 60 ++++++ docker-compose.yml | 2 +- 6 files changed, 94 insertions(+), 178 deletions(-) create mode 100644 GUI/src/utils/dataModelsUtils.ts diff --git a/DSL/Liquibase/master.yml b/DSL/Liquibase/master.yml index ebb54eed..a5c8bd42 100644 --- a/DSL/Liquibase/master.yml +++ b/DSL/Liquibase/master.yml @@ -13,6 +13,10 @@ databaseChangeLog: file: changelog/global-classifier-script-v6-integrated-agencies.sql - include: file: changelog/mock-global-classifier-script-v7-centops-ckb.sql + - include: + file: changelog/global-classifier-script-v8-datasets.sql - include: file: changelog/global-classifier-script-v9-data-models.sql + - include: + file: changelog/global-classifier-script-v10-datasets-metadata.sql diff --git a/GUI/src/App.tsx b/GUI/src/App.tsx index a5b40b86..5f149696 100644 --- a/GUI/src/App.tsx +++ b/GUI/src/App.tsx @@ -14,6 +14,7 @@ import Datasets from 'pages/Datasets'; import ViewDataset from 'pages/ViewDataset'; import DataModels from 'pages/DataModels'; import CreateDataModel from 'pages/DataModels/CreateDataModel'; +import ConfigureDataModel from 'pages/DataModels/ConfigureDataModel'; const App: FC = () => { const navigate = useNavigate(); @@ -68,6 +69,8 @@ const App: FC = () => { } /> } /> } /> + } /> + )} diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index 21a7b595..c74fcd04 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -4,12 +4,7 @@ import { Link, useNavigate } from 'react-router-dom'; import { Button, Card, Dialog } from 'components'; import { useDialog } from 'hooks/useDialog'; import BackArrowButton from 'assets/BackArrowButton'; -import { - deleteDataModel, - getMetadata, - retrainDataModel, - updateDataModel, -} from 'services/data-models'; + import DataModelForm from 'components/molecules/DataModelForm'; import { getChangedAttributes } from 'utils/dataModelsUtils'; import { Platform, UpdateType } from 'enums/dataModelsEnums'; @@ -35,19 +30,17 @@ const ConfigureDataModel: FC = ({ const [enabled, setEnabled] = useState(true); const [initialData, setInitialData] = useState>({ modelName: '', - dgId: 0, - platform: '', + datasetId: 0, baseModels: [], - maturity: '', + deploymentEnvironment: '', version: '', }); const [dataModel, setDataModel] = useState({ modelId: 0, modelName: '', - dgId: 0, - platform: '', + datasetId: 0, baseModels: [], - maturity: '', + deploymentEnvironment: '', version: '', }); const [modalOpen, setModalOpen] = useState(false); @@ -55,34 +48,7 @@ const ConfigureDataModel: FC = ({ const [modalTitle, setModalTitle] = useState(''); const [modalDiscription, setModalDiscription] = useState(''); const modalFunciton = useRef(() => { }); - const { isLoading } = useQuery( - dataModelsQueryKeys.GET_META_DATA(id), - () => getMetadata(id), - { - enabled, - onSuccess: (data) => { - setDataModel({ - modelId: data?.modelId || 0, - modelName: data?.modelName || '', - dgId: data?.connectedDgId || 0, - platform: data?.deploymentEnv || '', - baseModels: data?.baseModels || [], - maturity: data?.maturityLabel || '', - version: `V${data?.majorVersion}.${data?.minorVersion}`, - }); - setInitialData({ - modelName: data?.modelName || '', - dgId: data?.connectedDgId || 0, - platform: data?.deploymentEnv || '', - baseModels: data?.baseModels || [], - maturity: data?.maturityLabel || '', - version: `V${data?.majorVersion}.${data?.minorVersion}`, - }); - setEnabled(false); - }, - } - ); - + const handleDataModelAttributesChange = ( name: keyof DataModel, value: any @@ -96,136 +62,27 @@ const ConfigureDataModel: FC = ({ const handleSave = () => { const payload = getChangedAttributes(initialData, dataModel); let updateType: string | undefined; - if (payload.dgId) { + if (payload.datasetId) { updateType = UpdateType.MAJOR; - } else if (payload.baseModels || payload.platform) { + } else if (payload.baseModels) { updateType = UpdateType.MINOR; - } else if (payload.maturity) { - updateType = UpdateType.MATURITY_LABEL; - } + } const updatedPayload = { modelId: dataModel.modelId, - connectedDgId: payload.dgId, - deploymentEnv: payload.platform, + connectedDgId: payload.datasetId, + deploymentEnv: payload.deploymentEnvironment, baseModels: payload.baseModels, - maturityLabel: payload.maturity, updateType: updateType, }; - if (updateType) { - if (availableProdModels?.includes(dataModel.platform)) { - openModal( - t('dataModels.createDataModel.replaceDesc'), - t('dataModels.createDataModel.replaceTitle'), - () => updateDataModelMutation.mutate(updatedPayload), - 'replace' - ); - } else { - updateDataModelMutation.mutate(updatedPayload); - } - } + }; - const updateDataModelMutation = useMutation({ - mutationFn: (data: UpdatedDataModelPayload) => updateDataModel(data), - onSuccess: async () => { - open({ - title: t('dataModels.configureDataModel.saveChangesTitile'), - content:

    {t('dataModels.configureDataModel.saveChangesDesc')}

    , - footer: ( -
    - {' '} - -
    - ), - }); - }, - onError: () => { - open({ - title: t('dataModels.configureDataModel.updateErrorTitile'), - content:

    {t('dataModels.configureDataModel.updateErrorDesc')}

    , - }); - }, - }); - const handleDelete = () => { - if ( - dataModel.platform === Platform.JIRA || - dataModel.platform === Platform.OUTLOOK - ) { - open({ - title: t('dataModels.configureDataModel.deleteErrorTitle'), - content:

    {t('dataModels.configureDataModel.deleteErrorDesc')}

    , - footer: ( -
    - -
    - ), - }); - } else { - openModal( - t('dataModels.configureDataModel.deleteConfirmationDesc'), - t('dataModels.configureDataModel.deleteConfirmation'), - () => deleteDataModelMutation.mutate(dataModel.modelId), - 'delete' - ); - - } + }; - const deleteDataModelMutation = useMutation({ - mutationFn: (modelId: number) => deleteDataModel(modelId), - onSuccess: async (response) => { - close(); - navigate(0); - }, - onError: () => { - open({ - title: t('dataModels.configureDataModel.deleteModalErrorTitle'), - content: ( -

    {t('dataModels.configureDataModel.deleteModalErrorDesc')}

    - ), - }); - }, - }); - - const retrainDataModelMutation = useMutation({ - mutationFn: (modelId: number) => retrainDataModel(modelId), - onSuccess: async () => { - close(); - navigate(0); - setModalOpen(false) - }, - onError: () => { - open({ - title: t('dataModels.configureDataModel.retrainDataModalErrorTitle'), - content: ( -

    {t('dataModels.configureDataModel.retrainDataModalErrorDesc')}

    - ), - }); - }, - }); const openModal = ( content: string, @@ -259,7 +116,6 @@ const ConfigureDataModel: FC = ({

    {t('dataModels.configureDataModel.retrainCard')}

    - {isLoading ? ( + {false ? ( ) : ( = ({ > {modalType === 'retrain' ? ( ) : modalType === 'delete' ? ( ) : (
    diff --git a/GUI/src/components/molecules/DataModelCard/index.tsx b/GUI/src/components/molecules/DataModelCard/index.tsx index 0aa795ce..88de104a 100644 --- a/GUI/src/components/molecules/DataModelCard/index.tsx +++ b/GUI/src/components/molecules/DataModelCard/index.tsx @@ -8,6 +8,7 @@ import Card from 'components/Card'; import { useTranslation } from 'react-i18next'; import { TrainingResults } from 'types/dataModels'; import { formatDate } from 'utils/commonUtilts'; +import { useNavigate } from 'react-router-dom'; type DataModelCardProps = { modelId: number | string; @@ -38,7 +39,12 @@ const DataModelCard: FC> = ({ const { open, close } = useDialog(); const { t } = useTranslation(); const resultsJsonData: TrainingResults = JSON.parse(results ?? '{}'); +const navigate = useNavigate(); +const configureDataModel = () => { + navigate(`/configure-datamodel?datamodelId=${modelId}`); + +}; const renderTrainingStatus = (status: string | undefined) => { if (status === TrainingStatus.RETRAINING_NEEDED) { return ( @@ -52,10 +58,10 @@ const DataModelCard: FC> = ({ {t('dataModels.trainingStatus.trained') ?? ''} ); - } else if (status === TrainingStatus.TRAINING_INPROGRESS) { + } else if (status === TrainingStatus.TRAINING_INPROGRESS || status === TrainingStatus.INITIATING_TRAINING) { return ( ); } else if (status === TrainingStatus.FAILED) { @@ -112,6 +118,9 @@ const DataModelCard: FC> = ({
    {renderTrainingStatus(trainingStatus)} + {isLatest && } {renderMaturityLabel(maturity)}
    @@ -198,9 +207,7 @@ const DataModelCard: FC> = ({ diff --git a/GUI/src/components/molecules/DataModelForm/index.tsx b/GUI/src/components/molecules/DataModelForm/index.tsx index 63adfcad..9343003a 100644 --- a/GUI/src/components/molecules/DataModelForm/index.tsx +++ b/GUI/src/components/molecules/DataModelForm/index.tsx @@ -37,7 +37,7 @@ const DataModelForm: FC = ({ const { data: datasetVersions } = useQuery({ queryKey: dataModelsQueryKeys.DATA_MODEL_DEPLOYMENT_ENVIRONMENTS(), queryFn: () => getAllDatasetVersions(), - }); + }); return (
    @@ -98,7 +98,7 @@ const DataModelForm: FC = ({
    diff --git a/GUI/src/enums/dataModelsEnums.ts b/GUI/src/enums/dataModelsEnums.ts index 1dc34de2..e059554e 100644 --- a/GUI/src/enums/dataModelsEnums.ts +++ b/GUI/src/enums/dataModelsEnums.ts @@ -1,6 +1,7 @@ export enum TrainingStatus { NOT_TRAINED = 'not_trained', TRAINING_INPROGRESS = 'training_in_progress', + INITIATING_TRAINING = 'initiating_training', TRAINED = 'trained', RETRAINING_NEEDED = 'retraining_needed', FAILED = 'training_failed', diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index c74fcd04..d19eb4d1 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -1,6 +1,6 @@ -import { FC, useRef, useState } from 'react'; +import { FC, useEffect, useRef, useState } from 'react'; import { useMutation, useQuery } from '@tanstack/react-query'; -import { Link, useNavigate } from 'react-router-dom'; +import { Link, useNavigate, useSearchParams } from 'react-router-dom'; import { Button, Card, Dialog } from 'components'; import { useDialog } from 'hooks/useDialog'; import BackArrowButton from 'assets/BackArrowButton'; @@ -14,41 +14,65 @@ import { DataModel, UpdatedDataModelPayload } from 'types/dataModels'; import { dataModelsQueryKeys } from 'utils/queryKeys'; import { useTranslation } from 'react-i18next'; import './DataModels.scss'; +import { configureDataModel, getDataModelMetadata } from 'services/datamodels'; +import { use } from 'i18next'; +import { set } from 'date-fns'; -type ConfigureDataModelType = { - id: number; - availableProdModels?: string[]; -}; - -const ConfigureDataModel: FC = ({ - id, - availableProdModels, -}) => { +const ConfigureDataModel: FC = () => { const { t } = useTranslation(); const { open, close } = useDialog(); const navigate = useNavigate(); const [enabled, setEnabled] = useState(true); - const [initialData, setInitialData] = useState>({ - modelName: '', - datasetId: 0, - baseModels: [], - deploymentEnvironment: '', - version: '', - }); - const [dataModel, setDataModel] = useState({ - modelId: 0, - modelName: '', - datasetId: 0, - baseModels: [], - deploymentEnvironment: '', - version: '', - }); + const [modalOpen, setModalOpen] = useState(false); const [modalType, setModalType] = useState(''); const [modalTitle, setModalTitle] = useState(''); const [modalDiscription, setModalDiscription] = useState(''); const modalFunciton = useRef(() => { }); - + const [searchParams] = useSearchParams(); + const modelId = searchParams.get('datamodelId'); + const { data: modelMetadata } = useQuery({ + queryKey: dataModelsQueryKeys.GET_META_DATA(modelId ?? ''), + queryFn: () => getDataModelMetadata(modelId ?? ''), + }); + + const [initialData, setInitialData] = useState>({ + modelName: modelMetadata?.modelName, + datasetId: modelMetadata?.connectedDsId, + baseModels:modelMetadata?.baseModels, + deploymentEnvironment: modelMetadata?.deploymentEnv, + version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, + }); + + const [dataModel, setDataModel] = useState({ + modelId: modelMetadata?.modelId, + modelName: modelMetadata?.modelName, + datasetId: modelMetadata?.connectedDsId.toString(), + baseModels: modelMetadata ? JSON.parse(modelMetadata?.baseModels.value) : [], + deploymentEnvironment: modelMetadata?.deploymentEnv, + version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, + }); + + useEffect(() => { + setInitialData({ + modelId: modelMetadata?.modelId, + modelName: modelMetadata?.modelName, + datasetId: modelMetadata?.connectedDsId.toString(), + baseModels: modelMetadata ? JSON.parse(modelMetadata?.baseModels.value) : [], + deploymentEnvironment: modelMetadata?.deploymentEnv, + version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, + }); + setDataModel({ + modelId: modelMetadata?.modelId, + modelName: modelMetadata?.modelName, + datasetId: modelMetadata?.connectedDsId.toString(), + baseModels: modelMetadata ? JSON.parse(modelMetadata?.baseModels.value) : [], + deploymentEnvironment: modelMetadata?.deploymentEnv, + version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, + }); + }, [modelMetadata]); + + const handleDataModelAttributesChange = ( name: keyof DataModel, value: any @@ -59,28 +83,50 @@ const ConfigureDataModel: FC = ({ })); }; - const handleSave = () => { + const mutation = useMutation({ + mutationFn: configureDataModel, + onSuccess: () => { + open({ + title: t('dataModels.configureDataModel.saveChangesTitile'), + content: t('dataModels.configureDataModel.saveChangesDesc'), + footer: (
    ) + }); + + }, + onError: () => { + open({ + title: t('dataModels.configureDataModel.updateErrorTitile'), + content: t('dataModels.configureDataModel.updateErrorDesc'), + }); + }, + }); + + const handleSaveChanges = () => { const payload = getChangedAttributes(initialData, dataModel); let updateType: string | undefined; if (payload.datasetId) { updateType = UpdateType.MAJOR; } else if (payload.baseModels) { updateType = UpdateType.MINOR; - } + } const updatedPayload = { - modelId: dataModel.modelId, - connectedDgId: payload.datasetId, - deploymentEnv: payload.deploymentEnvironment, - baseModels: payload.baseModels, - updateType: updateType, + modelGroupKey: modelMetadata.modelGroupKey ?? "", + modelName: dataModel.modelName ?? "", + connectedDsId: Number(dataModel.datasetId) ?? 0, + deploymentEnv: dataModel.deploymentEnvironment ?? "", + baseModels: dataModel.baseModels ?? [], + connectedDsMajorVersion: Number(dataModel.version?.split('.')[0]?.[1]) ?? 0, + connectedDsMinorVersion: Number(dataModel.version?.split('.')[1]) ?? 0, + updateType: updateType ?? "", }; - + mutation.mutate(updatedPayload); + }; const handleDelete = () => { - + }; @@ -100,7 +146,7 @@ const ConfigureDataModel: FC = ({
    - navigate(0)}> +
    @@ -108,7 +154,7 @@ const ConfigureDataModel: FC = ({
    - + {/*
    @@ -122,13 +168,15 @@ const ConfigureDataModel: FC = ({
    - + */} {false ? ( ) : ( @@ -146,8 +194,8 @@ const ConfigureDataModel: FC = ({ {t('dataModels.configureDataModel.deleteModal')} diff --git a/GUI/src/pages/DataModels/CreateDataModel.tsx b/GUI/src/pages/DataModels/CreateDataModel.tsx index 9f79c6eb..18a7e935 100644 --- a/GUI/src/pages/DataModels/CreateDataModel.tsx +++ b/GUI/src/pages/DataModels/CreateDataModel.tsx @@ -14,6 +14,7 @@ import { ErrorsType, } from 'types/dataModels'; import { da } from 'date-fns/locale'; +import { createDataModel } from 'services/datamodels'; const CreateDataModel: FC = () => { const { t } = useTranslation(); @@ -62,20 +63,46 @@ const CreateDataModel: FC = () => { deploymentEnvironment: '', }); + const mutation = useMutation({ + mutationFn: createDataModel, + onSuccess: () => { + open({ + title: t('dataModels.createDataModel.successTitle'), + content: t('dataModels.createDataModel.successDesc'), + footer: (
    ) + }); + + }, + onError: () => { + open({ + title: t('dataModels.createDataModel.errorTitle'), + content: t('dataModels.createDataModel.errorDesc'), + }); + }, + }); + const handleCreate = () => { - console.log(dataModel); - + + const paylod = { + modelName: dataModel.modelName ?? "", + deploymentEnv: dataModel.deploymentEnvironment ?? "", + baseModels: dataModel.baseModels ?? [], + connectedDsId: Number(dataModel.datasetId) ?? 0, + connectedDsMajorVersion: Number(dataModel?.version?.split('.')[0]?.[1]) ?? "", + connectedDsMinorVersion: Number(dataModel?.version?.split('.')[1]) ?? "", + } + mutation.mutate(paylod); + }; + + const isCreateDisabled = () => { + return ( + !dataModel.modelName || + !dataModel.datasetId || + !dataModel.baseModels || + (Array.isArray(dataModel.baseModels) && dataModel.baseModels.length === 0) || + !dataModel.deploymentEnvironment + ); }; - -const isCreateDisabled = () => { - return ( - !dataModel.modelName || - !dataModel.datasetId || - !dataModel.baseModels || - (Array.isArray(dataModel.baseModels) && dataModel.baseModels.length === 0) || - !dataModel.deploymentEnvironment - ); -}; return (
    diff --git a/GUI/src/pages/DataModels/index.tsx b/GUI/src/pages/DataModels/index.tsx index da600615..8cd24f03 100644 --- a/GUI/src/pages/DataModels/index.tsx +++ b/GUI/src/pages/DataModels/index.tsx @@ -186,8 +186,8 @@ const DataModels: FC = () => { modelId={model?.modelId} dataModelName={model?.modelName} version={`V${model?.major}.${model?.minor}`} - // isLatest={model.latest} - datasetVersion={model?.datasetVersion} + isLatest={model.latest} + datasetVersion={`V${model?.connectedDsMajorVersion}.${model?.connectedDsMinorVersion}`} lastTrained={model?.lastTrained} trainingStatus={model.trainingStatus} modelStatus={model?.modelStatus} diff --git a/GUI/src/services/datamodels.ts b/GUI/src/services/datamodels.ts index 67b3c850..0d996d9a 100644 --- a/GUI/src/services/datamodels.ts +++ b/GUI/src/services/datamodels.ts @@ -25,4 +25,47 @@ export async function getDataModelsOverview( export async function getDeploymentEnvironments() { const { data } = await apiDev.get(dataModelsEndpoints.GET_DEPLOYMENT_ENVIRONMENTS()); return data?.response?? []; +} + +export async function getDataModelMetadata( + modelId: number | string, +) { + const { data } = await apiDev.get(dataModelsEndpoints.GET_MODEL_METADATA(), { + params: { + modelId + }, + }); + return data?.response?.[0]?? []; +} + +export async function createDataModel(payload: { + modelName: string; + deploymentEnv: string; + baseModels: string[]; + connectedDsId: string | number; + connectedDsMajorVersion: string | number; + connectedDsMinorVersion: string | number; +}) { + const { data } = await apiDev.post(dataModelsEndpoints.CREATE_MODEL(), payload); + return data?.response ?? {}; +} + +export async function configureDataModel(payload: { + modelGroupKey: string; + modelName: string; + deploymentEnv: string; + baseModels: string[]; + connectedDsId: string | number; + connectedDsMajorVersion: string | number; + connectedDsMinorVersion: string | number; + updateType: string; +}, ) { + let endpoint =""; + if (payload.updateType === 'major') { + endpoint = dataModelsEndpoints.CREATE_MAJOR_VERSION(); + } else if (payload.updateType === 'minor') { + endpoint = dataModelsEndpoints.CREATE_MINOR_VERSION(); + } + const { data } = await apiDev.post(endpoint, payload); + return data?.response ?? {}; } \ No newline at end of file diff --git a/GUI/src/types/dataModels.ts b/GUI/src/types/dataModels.ts index 384798a2..2c4aacbd 100644 --- a/GUI/src/types/dataModels.ts +++ b/GUI/src/types/dataModels.ts @@ -57,7 +57,8 @@ export type DataModelResponse = { major: number; minor: number; latest: boolean; - datasetVersion?: string; + connectedDsMajorVersion?: string; + connectedDsMinorVersion?: string; dataModelName: string; lastTrained: string; trainingStatus: string; diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index 0eaa5cb5..ec850896 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -49,7 +49,12 @@ export const authEndpoints = { export const dataModelsEndpoints = { GET_OVERVIEW: (): string => '/global-classifier/datamodels/list', + GET_MODEL_METADATA: (): string => '/global-classifier/datamodels/metadata', GET_DEPLOYMENT_ENVIRONMENTS: (): string => '/global-classifier/datamodels/configs/environments', + CREATE_MODEL: (): string => '/global-classifier/datamodels/create', + CREATE_MAJOR_VERSION: (): string => '/global-classifier/datamodels/major', + CREATE_MINOR_VERSION: (): string => '/global-classifier/datamodels/minor', + GET_DATAMODELS_FILTERS: (): string => diff --git a/GUI/src/utils/queryKeys.ts b/GUI/src/utils/queryKeys.ts index c4ffcc04..71240af3 100644 --- a/GUI/src/utils/queryKeys.ts +++ b/GUI/src/utils/queryKeys.ts @@ -81,7 +81,7 @@ export const dataModelsQueryKeys = { sort ].filter((val) => val !== undefined); }, - GET_META_DATA: function (modelId?: number) { + GET_META_DATA: function (modelId?: number | string) { return ['datamodels/metadata', `${modelId}`].filter( (val) => val !== undefined ); diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index 09e70c92..ad5d2ffa 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -345,7 +345,7 @@ "trainingStatus": { "retrainingNeeded": "Retraining Needed", "trained": "Trained", - "trainingInProgress": "Training In Progress", + "initiatingTraining": "Initiating Training", "trainingFailed": "Training Failed", "notTrained": "Not Trained" }, diff --git a/docker-compose.yml b/docker-compose.yml index 4ca0f778..683363a7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -188,7 +188,7 @@ services: - OLLAMA_HOST=0.0.0.0 volumes: - dataset_gen_ollama_models:/root/.ollama - - ./src/dataset-generation/ollama-entrypoint.sh:/ollama-entrypoint.sh + - ./DSL/DatasetGenerator/ollama-entrypoint.sh:/ollama-entrypoint.sh entrypoint: ["bash", "/ollama-entrypoint.sh"] # deploy: # resources: From c29a1bb9bd0c3fa4c2f74d565c9502d182a6b2a0 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Wed, 9 Jul 2025 00:53:11 +0530 Subject: [PATCH 079/195] feat: Implement production data model retrieval and update logic --- .../POST/get-data-models.sql | 1 + .../POST/get-production-data-model.sql | 6 + .../POST/update-datamodel-deployment-env.sql | 14 + .../global-classifier/GET/datamodels/list.yml | 5 + .../GET/datamodels/production-model.yml | 20 ++ .../POST/datamodels/create.yml | 27 ++ .../POST/datamodels/major.yml | 29 +- .../POST/datamodels/minor.yml | 29 +- .../molecules/DataModelCard/index.tsx | 8 +- GUI/src/pages/DataModels/index.tsx | 305 ++++++++++-------- GUI/src/services/datamodels.ts | 5 + GUI/src/types/dataModels.ts | 2 +- GUI/src/utils/endpoints.ts | 4 +- GUI/src/utils/queryKeys.ts | 1 + 14 files changed, 304 insertions(+), 152 deletions(-) create mode 100644 DSL/Resql/global-classifier/POST/get-production-data-model.sql create mode 100644 DSL/Resql/global-classifier/POST/update-datamodel-deployment-env.sql create mode 100644 DSL/Ruuter.private/global-classifier/GET/datamodels/production-model.yml diff --git a/DSL/Resql/global-classifier/POST/get-data-models.sql b/DSL/Resql/global-classifier/POST/get-data-models.sql index f88b3207..aaed147b 100644 --- a/DSL/Resql/global-classifier/POST/get-data-models.sql +++ b/DSL/Resql/global-classifier/POST/get-data-models.sql @@ -22,6 +22,7 @@ WHERE (:training_status = 'all' OR training_status = :training_status::training_status) AND (:model_status = 'all' OR model_status = :model_status::model_status) AND (:deployment_env = 'all' OR deployment_env = :deployment_env::deployment_environment) + AND deployment_env != 'production'::deployment_environment ORDER BY CASE WHEN :sort_by = 'createdAt' AND :sort_type = 'asc' THEN created_timestamp END ASC, CASE WHEN :sort_by = 'createdAt' AND :sort_type = 'desc' THEN created_timestamp END DESC, diff --git a/DSL/Resql/global-classifier/POST/get-production-data-model.sql b/DSL/Resql/global-classifier/POST/get-production-data-model.sql new file mode 100644 index 00000000..91e8f4ea --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-production-data-model.sql @@ -0,0 +1,6 @@ +SELECT * +FROM public.data_models +WHERE deployment_env = 'production'::deployment_environment + AND latest = true +ORDER BY updated_timestamp DESC +LIMIT 1; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/update-datamodel-deployment-env.sql b/DSL/Resql/global-classifier/POST/update-datamodel-deployment-env.sql new file mode 100644 index 00000000..902f55bd --- /dev/null +++ b/DSL/Resql/global-classifier/POST/update-datamodel-deployment-env.sql @@ -0,0 +1,14 @@ +UPDATE public.data_models +SET + deployment_env = 'undeployed'::deployment_environment, + updated_timestamp = CURRENT_TIMESTAMP +WHERE + deployment_env = 'production'::deployment_environment +RETURNING + model_id, + model_group_key, + model_name, + major, + minor, + deployment_env, + updated_timestamp; \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/GET/datamodels/list.yml b/DSL/Ruuter.private/global-classifier/GET/datamodels/list.yml index dfb033af..fa4a5c85 100644 --- a/DSL/Ruuter.private/global-classifier/GET/datamodels/list.yml +++ b/DSL/Ruuter.private/global-classifier/GET/datamodels/list.yml @@ -55,6 +55,11 @@ getAllDataModels: result: data_models_res next: return_result +assign_result: + assign: + dataModels: ${data_models_res.response.body} + next: return_result + return_result: return: ${data_models_res.response.body} next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/GET/datamodels/production-model.yml b/DSL/Ruuter.private/global-classifier/GET/datamodels/production-model.yml new file mode 100644 index 00000000..13eb90a4 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/GET/datamodels/production-model.yml @@ -0,0 +1,20 @@ +declaration: + call: declare + version: 0.1 + description: "Get the current production data model" + method: get + accepts: json + returns: json + namespace: global-classifier + +getProductionModel: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-production-data-model" + body: {} + result: productionModelRes + next: return_result + +return_result: + return: ${productionModelRes.response.body} + next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml index 792ac0c3..e10db9c6 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml @@ -58,6 +58,28 @@ validateEnumValues: next: return_invalid_deployment_env - condition: ${!baseModels.every(model => ["distil-bert", "roberta", "bert"].includes(model))} next: return_invalid_base_model + next: checkProductionDeployment + +# Check if Production Deployment +checkProductionDeployment: + switch: + - condition: ${deploymentEnv === "production"} + next: updateExistingProductionModels + next: insertModelMetadata + +# Update Existing Production Models to Undeployed +updateExistingProductionModels: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/update-datamodel-deployment-env" + result: update_production_res + next: checkProductionUpdateResult + +# Check Production Update Result +checkProductionUpdateResult: + switch: + - condition: ${!update_production_res || !update_production_res.response || !update_production_res.response.body} + next: return_production_update_failed next: insertModelMetadata # Insert Model Metadata @@ -120,6 +142,11 @@ return_invalid_base_model: status: 400 next: end +return_production_update_failed: + return: "error: failed to update existing production models to undeployed before creating new production model" + status: 500 + next: end + return_insert_failed: return: "error: failed to create model metadata record" status: 500 diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml index 6705ccec..5c788d8e 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml @@ -61,6 +61,28 @@ validateEnumValues: next: return_invalid_deployment_env - condition: ${!baseModels.every(model => ["distil-bert", "roberta", "bert"].includes(model))} next: return_invalid_base_model + next: checkProductionDeployment + +# Check if Production Deployment +checkProductionDeployment: + switch: + - condition: ${deploymentEnv === "production"} + next: updateExistingProductionModels + next: getLatestDataModel + +# Update Existing Production Models to Undeployed +updateExistingProductionModels: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/update-datamodel-deployment-env" + result: update_production_res + next: checkProductionUpdateResult + +# Check Production Update Result +checkProductionUpdateResult: + switch: + - condition: ${!update_production_res || !update_production_res.response || !update_production_res.response.body} + next: return_production_update_failed next: getLatestDataModel # Get Latest Data Model @@ -152,7 +174,7 @@ checkDatasetUpdateResult: # Return Result returnResult: - return: ${insertResult.response.body[0]} + return: "Data model major version updated successfully" status: 200 next: end @@ -171,6 +193,11 @@ return_invalid_base_model: status: 400 next: end +return_production_update_failed: + return: "error: failed to update existing production models to undeployed before creating new production model" + status: 500 + next: end + return_insert_failed: return: "error: failed to create new model version" status: 500 diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml index a99ed4d1..41512b33 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml @@ -61,6 +61,28 @@ validateEnumValues: next: return_invalid_deployment_env - condition: ${!baseModels.every(model => ["distil-bert", "roberta", "bert"].includes(model))} next: return_invalid_base_model + next: checkProductionDeployment + +# Check if Production Deployment +checkProductionDeployment: + switch: + - condition: ${deploymentEnv === "production"} + next: updateExistingProductionModels + next: getLatestDataModel + +# Update Existing Production Models to Undeployed +updateExistingProductionModels: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/update-datamodel-deployment-env" + result: update_production_res + next: checkProductionUpdateResult + +# Check Production Update Result +checkProductionUpdateResult: + switch: + - condition: ${!update_production_res || !update_production_res.response || !update_production_res.response.body} + next: return_production_update_failed next: getLatestDataModel # Get Latest Data Model @@ -144,7 +166,7 @@ checkDatasetUpdateResult: # Return Result returnResult: - return: ${insertResult.response.body[0]} + return: "Data model minor version updated successfully" status: 200 next: end @@ -163,6 +185,11 @@ return_invalid_base_model: status: 400 next: end +return_production_update_failed: + return: "error: failed to update existing production models to undeployed before creating new production model" + status: 500 + next: end + return_insert_failed: return: "error: failed to create new model version" status: 500 diff --git a/GUI/src/components/molecules/DataModelCard/index.tsx b/GUI/src/components/molecules/DataModelCard/index.tsx index 88de104a..7b61357b 100644 --- a/GUI/src/components/molecules/DataModelCard/index.tsx +++ b/GUI/src/components/molecules/DataModelCard/index.tsx @@ -19,7 +19,7 @@ type DataModelCardProps = { lastTrained?: string; trainingStatus?: string; modelStatus?: string; - maturity?: string; + deploymentEnv?: string; results?: string | null; }; @@ -32,7 +32,7 @@ const DataModelCard: FC> = ({ lastTrained, trainingStatus, modelStatus, - maturity, + deploymentEnv, results, }) => { @@ -115,13 +115,13 @@ const configureDataModel = () => { {lastTrained && formatDate(new Date(lastTrained), 'D.M.yy-H:m')}

    -
    +
    {renderTrainingStatus(trainingStatus)} {isLatest && } - {renderMaturityLabel(maturity)} + {renderMaturityLabel(deploymentEnv)}
    diff --git a/GUI/src/pages/DataModels/index.tsx b/GUI/src/pages/DataModels/index.tsx index 8cd24f03..9d33c3bc 100644 --- a/GUI/src/pages/DataModels/index.tsx +++ b/GUI/src/pages/DataModels/index.tsx @@ -16,7 +16,7 @@ import { import { dataModelsQueryKeys } from 'utils/queryKeys'; import NoDataView from 'components/molecules/NoDataView'; import './DataModels.scss'; -import { getDataModelsOverview, getDeploymentEnvironments } from 'services/datamodels'; +import { getDataModelsOverview, getDeploymentEnvironments, getProductionDataModel } from 'services/datamodels'; import { fi } from 'date-fns/locale'; import { modelStatuses, trainingStatuses } from 'config/dataModelsConfig'; @@ -41,7 +41,12 @@ const DataModels: FC = () => { queryFn: () => getDataModelsOverview(pageIndex, filters.modelStatus, filters.trainingStatus, filters.deploymentEnvironment, filters.sort), }); - const { data: deploymentEnvironmentsData } = useQuery({ + const { data: prodDataModel, isLoading: isProdDataModelLoading } = useQuery({ + queryKey: dataModelsQueryKeys.GET_PROD_DATA_MODEL(), + queryFn: () => getProductionDataModel(), + }); + + const { data: deploymentEnvironmentsData } = useQuery({ queryKey: dataModelsQueryKeys.DATA_MODEL_DEPLOYMENT_ENVIRONMENTS(), queryFn: () => getDeploymentEnvironments(), }); @@ -60,10 +65,10 @@ const DataModels: FC = () => { return (
    -
    - {!isModelDataLoading ? ( -
    - {/*
    +
    + {!isModelDataLoading ? ( +
    + {/*
    {t('dataModels.productionModels')} @@ -71,150 +76,164 @@ const DataModels: FC = () => {
    */} -
    -
    -
    {t('dataModels.dataModels')}
    +
    +
    +
    {t('dataModels.dataModels')}
    + +
    +
    +
    + + + handleFilterChange('modelStatus', selection?.value ?? '') + } + defaultValue={filters?.modelStatus} + style={{ width: '15%' }} + /> + + + handleFilterChange('trainingStatus', selection?.value) + } + defaultValue={filters?.trainingStatus} + style={{ width: '15%' }} + /> + + handleFilterChange('deploymentEnvironment', selection?.value) + } + defaultValue={filters?.deploymentEnvironment} + style={{ width: '25%' }} + /> + + handleFilterChange('sort', selection?.value) + } + defaultValue={filters?.sort} + style={{ width: '25%' }} + />
    -
    -
    - - - handleFilterChange('modelStatus', selection?.value ?? '') - } - defaultValue={filters?.modelStatus} - style={{ width: '15%' }} - /> - - - handleFilterChange('trainingStatus', selection?.value) - } - defaultValue={filters?.trainingStatus} - style={{ width: '15%' }} - /> - - handleFilterChange('deploymentEnvironment', selection?.value) - } - defaultValue={filters?.deploymentEnvironment} - style={{ width: '25%' }} - /> - - handleFilterChange('sort', selection?.value) - } - defaultValue={filters?.sort} - style={{ width: '25%' }} - /> - -
    - {/*
    */} - - - {/*
    */} -
    - {dataModelsData?.length > 0 ? ( -
    - {dataModelsData?.map( - (model: DataModelResponse, index: number) => { - return ( - - ); - } - )} -
    - ) : ( - - )}
    - 1} - canNextPage={pageIndex < 10} - onPageChange={setPageIndex} - /> + {prodDataModel?.length !==0 &&
    +

    Deployed Model

    +
    +
    +
    } +

    Other Data Models

    + + {dataModelsData?.length > 0 ? ( +
    + {dataModelsData?.map( + (model: DataModelResponse, index: number) => { + return ( + + ); + } + )} +
    + ) : ( + + )}
    - ) : ( - - )} -
    + 1} + canNextPage={pageIndex < 10} + onPageChange={setPageIndex} + /> +
    + ) : ( + + )} +
    ); }; diff --git a/GUI/src/services/datamodels.ts b/GUI/src/services/datamodels.ts index 0d996d9a..843d35bc 100644 --- a/GUI/src/services/datamodels.ts +++ b/GUI/src/services/datamodels.ts @@ -27,6 +27,11 @@ export async function getDeploymentEnvironments() { return data?.response?? []; } +export async function getProductionDataModel() { + const { data } = await apiDev.get(dataModelsEndpoints.GET_PRODUCTION_DATA_MODEL()); + return data?.response?.[0]?? []; +} + export async function getDataModelMetadata( modelId: number | string, ) { diff --git a/GUI/src/types/dataModels.ts b/GUI/src/types/dataModels.ts index 2c4aacbd..8e39961f 100644 --- a/GUI/src/types/dataModels.ts +++ b/GUI/src/types/dataModels.ts @@ -62,7 +62,7 @@ export type DataModelResponse = { dataModelName: string; lastTrained: string; trainingStatus: string; - deploymentEnvironment: string; + deploymentEnv: string; modelStatus: string; trainingResults?: string | null; }; diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index ec850896..982a8abc 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -49,14 +49,14 @@ export const authEndpoints = { export const dataModelsEndpoints = { GET_OVERVIEW: (): string => '/global-classifier/datamodels/list', + GET_PRODUCTION_DATA_MODEL: (): string => '/global-classifier/datamodels/production-model', GET_MODEL_METADATA: (): string => '/global-classifier/datamodels/metadata', GET_DEPLOYMENT_ENVIRONMENTS: (): string => '/global-classifier/datamodels/configs/environments', CREATE_MODEL: (): string => '/global-classifier/datamodels/create', CREATE_MAJOR_VERSION: (): string => '/global-classifier/datamodels/major', CREATE_MINOR_VERSION: (): string => '/global-classifier/datamodels/minor', - - + GET_DATAMODELS_FILTERS: (): string => '/global-classifier/datamodel/overview/filters', GET_METADATA: (): string => `/global-classifier/datamodel/metadata`, diff --git a/GUI/src/utils/queryKeys.ts b/GUI/src/utils/queryKeys.ts index 71240af3..fae5887a 100644 --- a/GUI/src/utils/queryKeys.ts +++ b/GUI/src/utils/queryKeys.ts @@ -63,6 +63,7 @@ export const authQueryKeys = { export const dataModelsQueryKeys = { DATA_MODEL_FILTERS: (): string[] => ['datamodels/filters'], + GET_PROD_DATA_MODEL: (): string[] => ['datamodels/production-model'], DATA_MODEL_DEPLOYMENT_ENVIRONMENTS: (): string[] => ['datamodels/deployment-environments'], DATA_MODELS_OVERVIEW: function ( pageIndex?: number, From 1321e2794d5ad3f4be644b1f8f8143c1687bd6c9 Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Wed, 9 Jul 2025 00:57:39 +0530 Subject: [PATCH 080/195] Update GUI/src/services/datamodels.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- GUI/src/services/datamodels.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUI/src/services/datamodels.ts b/GUI/src/services/datamodels.ts index 843d35bc..c9e61ba6 100644 --- a/GUI/src/services/datamodels.ts +++ b/GUI/src/services/datamodels.ts @@ -29,7 +29,7 @@ export async function getDeploymentEnvironments() { export async function getProductionDataModel() { const { data } = await apiDev.get(dataModelsEndpoints.GET_PRODUCTION_DATA_MODEL()); - return data?.response?.[0]?? []; + return data?.response?.[0] ?? null; } export async function getDataModelMetadata( From c4ece0aa2d32aa10b7ce36f99f51ff8ec37a7cbb Mon Sep 17 00:00:00 2001 From: erangi-ar <111747955+erangi-ar@users.noreply.github.com> Date: Wed, 9 Jul 2025 00:57:48 +0530 Subject: [PATCH 081/195] Update GUI/src/pages/DataModels/index.tsx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- GUI/src/pages/DataModels/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUI/src/pages/DataModels/index.tsx b/GUI/src/pages/DataModels/index.tsx index 9d33c3bc..bcb9037b 100644 --- a/GUI/src/pages/DataModels/index.tsx +++ b/GUI/src/pages/DataModels/index.tsx @@ -175,7 +175,7 @@ const DataModels: FC = () => {
    - {prodDataModel?.length !==0 &&
    + {prodDataModel != null &&

    Deployed Model

    Date: Wed, 9 Jul 2025 00:57:59 +0530 Subject: [PATCH 082/195] Update GUI/src/components/FormElements/FormCheckboxes/index.tsx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- GUI/src/components/FormElements/FormCheckboxes/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUI/src/components/FormElements/FormCheckboxes/index.tsx b/GUI/src/components/FormElements/FormCheckboxes/index.tsx index e397e53c..47d8e235 100644 --- a/GUI/src/components/FormElements/FormCheckboxes/index.tsx +++ b/GUI/src/components/FormElements/FormCheckboxes/index.tsx @@ -38,7 +38,7 @@ const FormCheckboxes: FC = ({ const newValues = checked ? [...internalSelectedValues, value] - : internalSelectedValues?.filter((v: string) => v !== value); + : internalSelectedValues.filter((v: string) => v !== value); setInternalSelectedValues(newValues); From 7634aec775e618df5bf7d01bc8f30d7b5889c964 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Fri, 11 Jul 2025 09:53:26 +0530 Subject: [PATCH 083/195] complete training pipeline --- .gitignore | 1 + DSL/CronManager/DSL/data_model.yml | 7 +- DSL/CronManager/DSL/data_resync.yml | 4 +- DSL/CronManager/DSL/data_sync.yml | 4 +- .../script/python_train_script_starter.sh | 197 -- .../script/train_script_starter.sh | 212 ++ ...ssifier-script-v11-model-training-jobs.sql | 25 + DSL/Liquibase/master.yml | 3 + .../check-training-job-status-in-progress.sql | 6 + .../get-data-model-info-by-given-model-id.sql | 5 + .../POST/get-first-come-training-job.sql | 9 + .../POST/insert-training-job-to-queue.sql | 15 + .../POST/update-training-job-status.sql | 3 + .../global-classifier/GET/.guard | 5 + .../POST/datamodel/retrain.yml | 90 - .../POST/datamodel/train.yml | 68 + docker-compose.yml | 41 +- sidecar.env | 2 +- src/model-training/Dockerfile.cpu | 57 - src/model-training/Dockerfile.gpu | 77 - src/model-training/__init__.py | 0 src/model-training/constants.py | 159 -- src/model-training/datapipeline.py | 276 -- src/model-training/model_trainer.py | 579 ----- src/model-training/model_trainer_api.py | 274 -- src/model-training/requirements-cpu.txt | 94 - src/model-training/requirements-gpu.txt | 99 - src/model-training/requirements.txt | 65 - src/model-training/s3_ferry.py | 46 - src/model-training/trainingpipeline.py | 800 ------ src/training/README.md | 516 +++- src/training/dataset_artifacts/3.json | 2230 ----------------- src/training/requirements.txt | 5 + src/training/scripts/constants.py | 22 + src/training/scripts/create_datasets.py | 438 ++-- src/training/scripts/create_datasets_v1.py | 333 --- src/training/scripts/evaluate.py | 611 ----- src/training/scripts/inference.py | 993 -------- src/training/scripts/mlflow_log.py | 763 ------ src/training/scripts/s3_ferry_service.py | 181 ++ src/training/scripts/s3_utility_handler.py | 193 ++ src/training/scripts/train.py | 1610 ++++++------ src/training/scripts/train_v1.py | 582 ----- src/training/scripts/utils.py | 905 +++---- src/training/training_entrypoint.sh | 46 + 45 files changed, 2783 insertions(+), 9868 deletions(-) delete mode 100755 DSL/CronManager/script/python_train_script_starter.sh create mode 100644 DSL/CronManager/script/train_script_starter.sh create mode 100644 DSL/Liquibase/changelog/global-classifier-script-v11-model-training-jobs.sql create mode 100644 DSL/Resql/global-classifier/POST/check-training-job-status-in-progress.sql create mode 100644 DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql create mode 100644 DSL/Resql/global-classifier/POST/get-first-come-training-job.sql create mode 100644 DSL/Resql/global-classifier/POST/insert-training-job-to-queue.sql create mode 100644 DSL/Resql/global-classifier/POST/update-training-job-status.sql delete mode 100644 DSL/Ruuter.private/global-classifier/POST/datamodel/retrain.yml create mode 100644 DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml delete mode 100644 src/model-training/Dockerfile.cpu delete mode 100644 src/model-training/Dockerfile.gpu delete mode 100644 src/model-training/__init__.py delete mode 100644 src/model-training/constants.py delete mode 100644 src/model-training/datapipeline.py delete mode 100644 src/model-training/model_trainer.py delete mode 100644 src/model-training/model_trainer_api.py delete mode 100644 src/model-training/requirements-cpu.txt delete mode 100644 src/model-training/requirements-gpu.txt delete mode 100644 src/model-training/requirements.txt delete mode 100644 src/model-training/s3_ferry.py delete mode 100644 src/model-training/trainingpipeline.py delete mode 100644 src/training/dataset_artifacts/3.json delete mode 100644 src/training/scripts/create_datasets_v1.py delete mode 100644 src/training/scripts/evaluate.py delete mode 100644 src/training/scripts/inference.py delete mode 100644 src/training/scripts/mlflow_log.py create mode 100644 src/training/scripts/s3_ferry_service.py create mode 100644 src/training/scripts/s3_utility_handler.py delete mode 100644 src/training/scripts/train_v1.py create mode 100644 src/training/training_entrypoint.sh diff --git a/.gitignore b/.gitignore index 3c62a8e7..ead17596 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,4 @@ models data output_datasets/ venv +dataset_artifacts/ diff --git a/DSL/CronManager/DSL/data_model.yml b/DSL/CronManager/DSL/data_model.yml index 7140558f..88c459e4 100644 --- a/DSL/CronManager/DSL/data_model.yml +++ b/DSL/CronManager/DSL/data_model.yml @@ -1,5 +1,6 @@ model_trainer: - trigger: off + agency_data_sync: + trigger: "0 0/1 * * * ?" + # trigger: off type: exec - command: "../app/scripts/python_train_script_starter.sh" - allowedEnvs: ['cookie', 'modelId', 'datasetId'] \ No newline at end of file + command: "../app/scripts/train_script_starter.sh" \ No newline at end of file diff --git a/DSL/CronManager/DSL/data_resync.yml b/DSL/CronManager/DSL/data_resync.yml index dc9c53cc..7ba86d3b 100644 --- a/DSL/CronManager/DSL/data_resync.yml +++ b/DSL/CronManager/DSL/data_resync.yml @@ -1,5 +1,5 @@ agency_data_resync: - trigger: "0 0/1 * * * ?" - # trigger: off + # trigger: "0 0/1 * * * ?" + trigger: off type: exec command: "../app/scripts/agency_data_resync.sh -s 10" diff --git a/DSL/CronManager/DSL/data_sync.yml b/DSL/CronManager/DSL/data_sync.yml index fe85ca33..32b8cfa0 100644 --- a/DSL/CronManager/DSL/data_sync.yml +++ b/DSL/CronManager/DSL/data_sync.yml @@ -1,5 +1,5 @@ agency_data_sync: - trigger: "0 0/1 * * * ?" - # trigger: off + # trigger: "0 0/1 * * * ?" + trigger: off type: exec command: "../app/scripts/agency_data_sync.sh -s 10" diff --git a/DSL/CronManager/script/python_train_script_starter.sh b/DSL/CronManager/script/python_train_script_starter.sh deleted file mode 100755 index 9bb21093..00000000 --- a/DSL/CronManager/script/python_train_script_starter.sh +++ /dev/null @@ -1,197 +0,0 @@ -#!/bin/bash - -# DEFINING ENDPOINTS - -GET_MODEL_METADATA_ENDPOINT=http://ruuter-private:8088/classifier/datamodel/metadata -CREATE_TRAINING_PROGRESS_SESSION_ENDPOINT=http://ruuter-private:8088/classifier/datamodel/progress/create -UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT=http://ruuter-private:8088/classifier/datamodel/progress/update -UPDATE_MODEL_METADATA_TRAINING_STATUS_ENDPOINT=http://ruuter-private:8088/classifier/datamodel/update/training/status - -# Send the request to the API and capture the output -MODEL_METADATA_ENDPOINT="$GET_MODEL_METADATA_ENDPOINT?modelId=$newModelId" - -echo $GET_MODEL_METADATA_ENDPOINT - -echo "cookie" -echo $cookie - -api_response=$(curl -s -H "Cookie: customJwtCookie=$cookie" -X GET "$MODEL_METADATA_ENDPOINT") - -echo $api_response - -# Check if the API response is valid -if [ -z "$api_response" ]; then - echo "API request failed to get the model metadata." - exit 1 -fi - -deploymentEnv=$(echo $api_response | grep -o '"deploymentEnv":[^,]*' | sed 's/.*"deploymentEnv"://' | tr -d '"}') -modelDetails=$api_response -export deploymentEnv -export modelDetails - -# Extract model details from the JSON response -modelName=$(echo "$modelDetails" | grep -o '"modelName":[^,]*' | sed 's/.*"modelName"://' | tr -d '"}') -majorVersion=$(echo "$modelDetails" | grep -o '"majorVersion":[^,]*' | sed 's/.*"majorVersion"://' | tr -d '"}') -minorVersion=$(echo "$modelDetails" | grep -o '"minorVersion":[^,]*' | sed 's/.*"minorVersion"://' | tr -d '"}') -latest=$(echo "$modelDetails" | grep -o '"latest":[^,]*' | sed 's/.*"latest"://' | tr -d '"}') - -# Construct payload to update training status using cat -payload=$(cat </dev/null; then + echo "❌ [MISSING or failed import] Package '$pkg'" + missing_pkgs+=("$pkg") + else + echo "✅ [FOUND] Package '$pkg'" + fi +done + +# Install if missing +if [ ${#missing_pkgs[@]} -ne 0 ]; then + echo "⚡ [ACTION] Missing packages detected: ${missing_pkgs[*]}" + + if ! command -v uv &>/dev/null; then + echo "⚡ Installing uv inside virtualenv..." + pip install uv || { echo "❌ Failed to install uv"; exit 1; } + else + echo "✅ uv already installed." + fi + + if [ ! -f /app/src/training/requirements.txt ]; then + echo "❌ /app/src/training/requirements.txt not found!" + exit 1 + fi + + echo "📦 [INSTALL] Installing from /app/src/training/requirements.txt using uv..." + uv pip install -r /app/src/training/requirements.txt || { + echo "⚠️ uv install failed — trying pip as fallback..." + pip install -r /app/src/training/requirements.txt || { + echo "❌ Both uv and pip install failed inside virtualenv" + exit 1 + } + } + + echo "🎉 [SUCCESS] Required packages installed successfully inside virtualenv." +else + echo "🎉 [SUCCESS] All required Python packages are already installed inside virtualenv." +fi +echo "✅ [VIRTUALENV] All checks passed, proceeding with training script..." +echo "🚀 [TRAINING] Starting training for Model ID: $model_id, Dataset ID: $dataset_id" + +# Set up training parameters +TRAINING_SCRIPT="/app/src/training/scripts/train.py" +TRAINING_OUTPUT_DIR="/app/models" +MLFLOW_TRACKING_URI="${MLFLOW_TRACKING_URI:-http://mlflow:5000}" +PROCESSED_DATA_DIR="/app/data/processed" + +# Create output directory for this training job +training_output_dir="${TRAINING_OUTPUT_DIR}/model_${model_id}" +mkdir -p "$training_output_dir" + +# Set default training parameters (can be made configurable) +max_seq_length=256 +num_epochs=3 +batch_size=8 +learning_rate=2e-5 + +echo "📋 [PARAMS] Training parameters:" +echo " - Dataset ID: $dataset_id" +echo " - Model ID: $model_id" +echo " - Model Type: $model_types" +echo " - Output Dir: $training_output_dir" +echo " - MLflow URI: $MLFLOW_TRACKING_URI" + +# Call the training script +echo "🎓 [EXECUTE] Calling training script..." + +python3 "$TRAINING_SCRIPT" \ + --model_types "$model_types" \ + --model_id "$model_id" \ + --job_id "$job_id" \ + --dataset_id "$dataset_id" \ + --data_dir "$PROCESSED_DATA_DIR" \ + --output_dir "$training_output_dir" \ + --mlflow_tracking_uri "$MLFLOW_TRACKING_URI" \ + --num_epochs "$num_epochs" \ + --batch_size "$batch_size" \ + --learning_rate "$learning_rate" \ + --max_seq_length "$max_seq_length" \ + --seed 42 + +training_exit_code=$? + +# Check training result +if [ $training_exit_code -eq 0 ]; then + echo "🎉 [SUCCESS] Training completed successfully" + echo "📁 [OUTPUT] Training outputs saved to: $training_output_dir" +else + echo "❌ [FAILED] Training failed with exit code: $training_exit_code" + exit 1 +fi + +echo "✅ [DONE] Training script starter completed" \ No newline at end of file diff --git a/DSL/Liquibase/changelog/global-classifier-script-v11-model-training-jobs.sql b/DSL/Liquibase/changelog/global-classifier-script-v11-model-training-jobs.sql new file mode 100644 index 00000000..0aedc47e --- /dev/null +++ b/DSL/Liquibase/changelog/global-classifier-script-v11-model-training-jobs.sql @@ -0,0 +1,25 @@ +-- liquibase formatted sql + +-- changeset charith:global-classifier-model-training-jobs-enum +-- Create training job status enum +CREATE TYPE training_job_status AS ENUM ('queued', 'training-in-progress', 'trained'); + +-- changeset charith:global-classifier-model-training-jobs-table +CREATE TABLE public.model_training_jobs ( + job_id BIGSERIAL PRIMARY KEY, + created_at INTEGER NOT NULL, + model_id BIGINT NOT NULL, + job_status training_job_status NOT NULL DEFAULT 'queued', + + -- Add foreign key constraint to data_models table + CONSTRAINT fk_model_training_jobs_model_id + FOREIGN KEY (model_id) + REFERENCES public.data_models(model_id) + ON DELETE CASCADE +); + +-- changeset charith:global-classifier-model-training-jobs-indexes +-- Create indexes separately +CREATE INDEX idx_model_training_jobs_model_id ON public.model_training_jobs (model_id); +CREATE INDEX idx_model_training_jobs_status ON public.model_training_jobs (job_status); +CREATE INDEX idx_model_training_jobs_created_at ON public.model_training_jobs (created_at); \ No newline at end of file diff --git a/DSL/Liquibase/master.yml b/DSL/Liquibase/master.yml index a5c8bd42..fdd40071 100644 --- a/DSL/Liquibase/master.yml +++ b/DSL/Liquibase/master.yml @@ -19,4 +19,7 @@ databaseChangeLog: file: changelog/global-classifier-script-v9-data-models.sql - include: file: changelog/global-classifier-script-v10-datasets-metadata.sql + - include: + file: changelog/global-classifier-script-v11-model-training-jobs.sql + diff --git a/DSL/Resql/global-classifier/POST/check-training-job-status-in-progress.sql b/DSL/Resql/global-classifier/POST/check-training-job-status-in-progress.sql new file mode 100644 index 00000000..2d37f8d5 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/check-training-job-status-in-progress.sql @@ -0,0 +1,6 @@ +SELECT + EXISTS( + SELECT 1 + FROM model_training_jobs + WHERE job_status = 'training-in-progress' + ) AS has_training_in_progress; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql b/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql new file mode 100644 index 00000000..656e5b21 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql @@ -0,0 +1,5 @@ +SELECT + dataset_id, + base_models +FROM data_models +WHERE model_id = :model_id; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-first-come-training-job.sql b/DSL/Resql/global-classifier/POST/get-first-come-training-job.sql new file mode 100644 index 00000000..327f9452 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-first-come-training-job.sql @@ -0,0 +1,9 @@ +SELECT + job_id, + model_id, + job_status, + created_at +FROM model_training_jobs +WHERE job_status = 'queued' +ORDER BY created_at ASC +LIMIT 1; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/insert-training-job-to-queue.sql b/DSL/Resql/global-classifier/POST/insert-training-job-to-queue.sql new file mode 100644 index 00000000..1803b820 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/insert-training-job-to-queue.sql @@ -0,0 +1,15 @@ +INSERT INTO model_training_jobs ( + model_id, + job_status, + created_at +) VALUES ( + :model_id, + 'queued'::training_job_status, + EXTRACT(EPOCH FROM NOW())::INTEGER +) +RETURNING + job_id, + model_id, + job_status, + created_at, + 'Training job successfully queued' AS message; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/update-training-job-status.sql b/DSL/Resql/global-classifier/POST/update-training-job-status.sql new file mode 100644 index 00000000..e2aa9eb4 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/update-training-job-status.sql @@ -0,0 +1,3 @@ +UPDATE public.model_training_jobs +SET job_status = :jobStatus::training_job_status +WHERE job_id = :jobId; \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/GET/.guard b/DSL/Ruuter.private/global-classifier/GET/.guard index 9d21179b..60ad9726 100644 --- a/DSL/Ruuter.private/global-classifier/GET/.guard +++ b/DSL/Ruuter.private/global-classifier/GET/.guard @@ -10,6 +10,11 @@ authenticate: headers: cookie: ${incoming.headers.cookie} result: authority_result + next: log_cookie + +log_cookie: + log: "Cookie received: ${incoming.headers.cookie}" + next: check_authority_result check_authority_result: switch: diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodel/retrain.yml b/DSL/Ruuter.private/global-classifier/POST/datamodel/retrain.yml deleted file mode 100644 index eeb95d64..00000000 --- a/DSL/Ruuter.private/global-classifier/POST/datamodel/retrain.yml +++ /dev/null @@ -1,90 +0,0 @@ -declaration: - call: declare - version: 0.1 - description: "Description placeholder for 'RE-TRAIN'" - method: post - accepts: json - returns: json - namespace: global-classifier - allowlist: - body: - - field: modelId - type: number - description: "Body field 'modelId'" - - field: datasetId - type: number - description: "Body field 'datasetId'" - headers: - - field: cookie - type: string - description: "Cookie field" - -extract_request_data: - assign: - model_id: ${incoming.body.modelId} - dataset_id: ${incoming.body.datasetId} - cookie: ${incoming.headers.cookie} - next: check_for_request_data - -check_for_request_data: - switch: - - condition: ${model_id !== null} && ${dataset_id !== null} - next: update_data_model_status - next: return_incorrect_request - -update_data_model_status: - call: http.post - args: - url: "[#CLASSIFIER_RESQL]/update-data-model-status-by-model-id" - body: - id: ${model_id} - result: res_model - next: check_data_model_status - -data_model_status_updated: - switch: - - condition: ${200 <= res_model.response.statusCodeValue && res_model.response.statusCodeValue < 300} - next: execute_cron_manager - next: assign_fail_response - -execute_cron_manager: - call: http.post - args: - url: "[#CLASSIFIER_CRON_MANAGER]/execute/data_model/model_trainer" - query: - cookie: ${cookie.replace('customJwtCookie=','')} #Removing the customJwtCookie phrase from payload to to send cookie token only - modelId: ${model_id} - datasetId: ${dataset_id} - result: res - next: assign_success_response - -assign_success_response: - assign: - format_res: { - modelId: '${model_id}', - operationSuccessful: true, - } - next: return_ok - -assign_fail_response: - assign: - format_res: { - modelId: '${model_id}', - operationSuccessful: false, - } - next: return_bad_request - -return_ok: - status: 200 - return: ${format_res} - next: end - -return_incorrect_request: - status: 400 - return: 'Missing Required Fields' - next: end - -return_bad_request: - status: 400 - return: ${format_res} - next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml b/DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml new file mode 100644 index 00000000..d18e2675 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml @@ -0,0 +1,68 @@ +declaration: + call: declare + version: 0.1 + description: "Description placeholder for 'RE-TRAIN'" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: modelId + type: number + description: "Body field 'modelId'" + +extract_request_data: + assign: + model_id: ${incoming.body.modelId} + next: check_for_request_data + +check_for_request_data: + switch: + - condition: ${model_id !== null} + next: create_training_job_in_queue + next: return_incorrect_request + +create_training_job_in_queue: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/insert-training-job-to-queue" + body: + model_id: ${model_id} + result: res_model + next: log_result + +log_result: + log: "Training job creation response: ${res_model.response.statusCodeValue} - ${res_model.response.body}" + next: check_insert_result + +check_insert_result: + switch: + - condition: ${200 <= res_model.response.statusCodeValue && res_model.response.statusCodeValue < 300} + next: assign_success_response + next: assign_fail_response + +assign_success_response: + assign: + success_format_res: + modelId: ${model_id} + operationSuccessful: true + message: "Training job successfully queued" + jobId: ${res_model.response.body[0].jobId} + next: return_ok + +assign_fail_response: + assign: + fail_format_res: + modelId: ${model_id} + operationSuccessful: false + message: "Failed to queue training job" + next: return_bad_request + +return_ok: + status: 200 + return: ${success_format_res} + +return_bad_request: + status: 400 + return: ${fail_format_res} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index ee38dd61..8b60d8c8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -145,22 +145,35 @@ services: cron-manager: container_name: cron-manager image: cron-manager-python:latest - user: "root" volumes: - ./DSL/CronManager/DSL:/DSL - ./DSL/CronManager/script:/app/scripts - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets - ./src/s3_dataset_processor:/app/src/s3_dataset_processor - ./DSL/DatasetGenerator/config:/app/config + - ./src/training:/app/src/training + - ./mlflow/mlflow_artifacts:/mlflow/mlflow_artifacts + - ./data/processed:/app/data/processed + - shared-volume:/shared - cron_data:/app/data + - ./src/training/dataset_artifacts:/app/src/training/dataset_artifacts + - ./models:/app/models + - ./src/training/logs:/app/src/training/logs + runtime: nvidia environment: + - NVIDIA_VISIBLE_DEVICES=all - server.port=9010 + - MLFLOW_TRACKING_URI=http://mlflow:5000 + - PYTHONPATH=/app:/app/src/training:/app/src/training/scripts:/app/src/s3_dataset_processor:/app/src ports: - 9010:8080 networks: - bykstack depends_on: - init + - gc-s3-ferry + - mlflow + # classifier-service: # container_name: classifier-service @@ -188,7 +201,7 @@ services: - OLLAMA_HOST=0.0.0.0 volumes: - dataset_gen_ollama_models:/root/.ollama - - ./src/dataset-generation/ollama-entrypoint.sh:/ollama-entrypoint.sh + - ./DSL/DatasetGenerator/ollama-entrypoint.sh:/ollama-entrypoint.sh entrypoint: ["bash", "/ollama-entrypoint.sh"] # deploy: # resources: @@ -223,23 +236,25 @@ services: - bykstack mlflow: - image: synthesisai/dataset-generator-mlflow:latest + build: + context: ./mlflow + dockerfile: Dockerfile container_name: mlflow ports: - "5000:5000" env_file: - sidecar.env environment: - - MLFLOW_TRACKING_USERNAME=${MLFLOW_TRACKING_USERNAME} - - MLFLOW_TRACKING_PASSWORD=${MLFLOW_TRACKING_PASSWORD} - - MLFLOW_HOST=${MLFLOW_HOST} - - MLFLOW_PORT=${MLFLOW_PORT} - - MLFLOW_BACKEND_STORE_URI=${MLFLOW_BACKEND_STORE_URI} - - MLFLOW_DEFAULT_ARTIFACT_ROOT=${MLFLOW_DEFAULT_ARTIFACT_ROOT} - - MLFLOW_FLASK_SERVER_SECRET_KEY=${MLFLOW_FLASK_SERVER_SECRET_KEY} + - MLFLOW_TRACKING_USERNAME=${MLFLOW_TRACKING_USERNAME} + - MLFLOW_TRACKING_PASSWORD=${MLFLOW_TRACKING_PASSWORD} + - MLFLOW_HOST=${MLFLOW_HOST} + - MLFLOW_PORT=${MLFLOW_PORT} + - MLFLOW_BACKEND_STORE_URI=${MLFLOW_BACKEND_STORE_URI} + - MLFLOW_DEFAULT_ARTIFACT_ROOT=${MLFLOW_DEFAULT_ARTIFACT_ROOT} + - MLFLOW_FLASK_SERVER_SECRET_KEY=${MLFLOW_FLASK_SERVER_SECRET_KEY} volumes: - - ./DSL/DatasetGenerator/mlflow_data:/mlflow/mlflow_data - - ./DSL/DatasetGenerator/mlflow_artifacts:/mlflow/mlflow_artifacts + - ./mlflow/mlflow_data:/mlflow/mlflow_data + - ./mlflow/mlflow_artifacts:/mlflow/mlflow_artifacts networks: - bykstack @@ -267,6 +282,8 @@ services: container_name: gc-s3-ferry volumes: - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + - ./src/training/dataset_artifacts:/app/src/training/dataset_artifacts + - ./models:/app/models env_file: - config.env ports: diff --git a/sidecar.env b/sidecar.env index dda8fe4a..7c49bc0b 100644 --- a/sidecar.env +++ b/sidecar.env @@ -1,6 +1,6 @@ # MLFLOW MLFLOW_TRACKING_USERNAME=mlflowadmin -MLFLOW_TRACKING_PASSWORD=mlflowadmin +MLFLOW_TRACKING_PASSWORD=value MLFLOW_HOST_PORT=5000 MLFLOW_CONT_PORT=5000 MLFLOW_HOST=0.0.0.0 diff --git a/src/model-training/Dockerfile.cpu b/src/model-training/Dockerfile.cpu deleted file mode 100644 index a3742640..00000000 --- a/src/model-training/Dockerfile.cpu +++ /dev/null @@ -1,57 +0,0 @@ -FROM python:3.11-slim - -ENV PYTHONUNBUFFERED=1 \ - DEBIAN_FRONTEND=noninteractive \ - PIP_NO_CACHE_DIR=1 \ - PIP_DISABLE_PIP_VERSION_CHECK=1 - -# Install system dependencies -RUN apt-get update && \ - apt-get install -y \ - curl \ - git \ - unzip \ - wget \ - build-essential \ - gcc \ - g++ \ - && rm -rf /var/lib/apt/lists/* - -# Set working directory -WORKDIR /app - -# Copy requirements first for better Docker layer caching -COPY requirements-cpu.txt /app/ - -# Upgrade pip and install Python dependencies -RUN pip install --upgrade pip - -# Install CPU-specific PyTorch and dependencies -RUN pip install --no-cache-dir -r requirements-cpu.txt --timeout=1000 - -# Copy the rest of the application -COPY . /app - -# Create required directories -RUN mkdir -p /shared /cache /app/outputs /app/model_trainer - -# Create non-root user for security -RUN useradd --create-home --shell /bin/bash app \ - && chown -R app:app /app /shared /cache -USER root - -# Expose the port the FastAPI app will run on -EXPOSE 8900 - -# Set environment variables -ENV HF_HOME=/cache/ \ - PYTHONPATH=/app \ - TORCH_HOME=/cache/torch \ - TRANSFORMERS_CACHE=/cache/transformers - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ - CMD curl -f http://localhost:8900/model_checker/ || exit 1 - -# Set the entry point to run the FastAPI server -CMD ["uvicorn", "model_trainer_api:app", "--host", "0.0.0.0", "--port", "8900"] \ No newline at end of file diff --git a/src/model-training/Dockerfile.gpu b/src/model-training/Dockerfile.gpu deleted file mode 100644 index b3d86c51..00000000 --- a/src/model-training/Dockerfile.gpu +++ /dev/null @@ -1,77 +0,0 @@ -FROM nvidia/cuda:12.1-devel-ubuntu20.04 - -ENV PYTHONUNBUFFERED=1 \ - DEBIAN_FRONTEND=noninteractive \ - PIP_NO_CACHE_DIR=1 \ - PIP_DISABLE_PIP_VERSION_CHECK=1 - -# Install system dependencies -RUN apt-get update && \ - apt-get install -y \ - software-properties-common && \ - add-apt-repository ppa:deadsnakes/ppa && \ - apt-get update && \ - apt-get install -y \ - curl \ - git \ - python3.11 \ - python3.11-dev \ - python3.11-distutils \ - unzip \ - wget \ - build-essential \ - gcc \ - g++ \ - nvidia-cuda-toolkit \ - && rm -rf /var/lib/apt/lists/* - -# Install pip for Python 3.11 -RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11 - -# Set Python 3.11 as the default python3 -RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 -RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 - -# Set working directory -WORKDIR /app - -# Copy requirements first for better Docker layer caching -COPY requirements-gpu.txt /app/ - -# Upgrade pip and install GPU-specific dependencies -RUN python3.11 -m pip install --upgrade pip - -# Install GPU-specific PyTorch and dependencies -RUN python3.11 -m pip install --no-cache-dir -r requirements-gpu.txt --timeout=2000 - -# Copy the rest of the application -COPY . /app - -# Create required directories -RUN mkdir -p /shared /cache /app/outputs /app/model_trainer - -# Create non-root user for security (but allow GPU access) -RUN useradd --create-home --shell /bin/bash app \ - && usermod -a -G video app \ - && chown -R app:app /app /shared /cache -USER app - -# Expose the port the FastAPI app will run on -EXPOSE 8900 - -# Set environment variables for GPU optimization -ENV HF_HOME=/cache/ \ - PYTHONPATH=/app \ - TORCH_HOME=/cache/torch \ - TRANSFORMERS_CACHE=/cache/transformers \ - CUDA_VISIBLE_DEVICES=0 \ - NVIDIA_VISIBLE_DEVICES=all \ - NVIDIA_DRIVER_CAPABILITIES=compute,utility \ - TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \ - CMD curl -f http://localhost:8900/model_checker/ || exit 1 - -# Set the entry point to run the FastAPI server -CMD ["uvicorn", "model_trainer_api:app", "--host", "0.0.0.0", "--port", "8900"] \ No newline at end of file diff --git a/src/model-training/__init__.py b/src/model-training/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/model-training/constants.py b/src/model-training/constants.py deleted file mode 100644 index 54ba37b8..00000000 --- a/src/model-training/constants.py +++ /dev/null @@ -1,159 +0,0 @@ -DATA_DOWNLOAD_ENDPOINT = "http://file-handler:8000/datasetgroup/data/download/json" - -GET_DATASET_METADATA_ENDPOINT = ( - "http://ruuter-private:8088/classifier/datasetgroup/group/metadata" -) - -GET_MODEL_METADATA_ENDPOINT = "http://ruuter-private:8088/classifier/datamodel/metadata" - -UPDATE_MODEL_TRAINING_STATUS_ENDPOINT = ( - "http://ruuter-private:8088/classifier/datamodel/update/training/status" -) - -CREATE_TRAINING_PROGRESS_SESSION_ENDPOINT = ( - "http://ruuter-private:8088/classifier/datamodel/progress/create" -) - -UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT = ( - "http://ruuter-private:8088/classifier/datamodel/progress/update" -) - -TEST_DEPLOYMENT_ENDPOINT = ( - "http://deployment-service:8003/classifier/datamodel/deployment/testing/update" -) - -TRAINING_LOGS_PATH = "/app/model_trainer/training_logs.log" - -MODEL_RESULTS_PATH = "/shared/model_trainer/results" # stored in the shared folder which is connected to s3-ferry - -LOCAL_BASEMODEL_TRAINED_LAYERS_SAVE_PATH = "/shared/model_trainer/results/{model_id}/trained_base_model_layers" # stored in the shared folder which is connected to s3-ferry - -LOCAL_CLASSIFICATION_LAYER_SAVE_PATH = "/shared/model_trainer/results/{model_id}/classifier_layers" # stored in the shared folder which is connected to s3-ferry - -LOCAL_LABEL_ENCODER_SAVE_PATH = "/shared/model_trainer/results/{model_id}/label_encoders" # stored in the shared folder which is connected to s3-ferry - -S3_FERRY_MODEL_STORAGE_PATH = "/models" # folder path in s3 bucket - -S3_FERRY_ENDPOINT = "http://s3-ferry:3000/v1/files/copy" - -BASE_MODEL_FILENAME = "base_model_trainable_layers_{model_id}" - -CLASSIFIER_MODEL_FILENAME = "classifier_{model_id}.pth" - -MODEL_TRAINING_IN_PROGRESS = "training in-progress" - -MODEL_TRAINING_SUCCESSFUL = "trained" - -MODEL_TRAINING_FAILED = "not trained" - - -# MODEL TRAINING PROGRESS SESSION CONSTANTS - -INITIATING_TRAINING_PROGRESS_STATUS = "Initiating Training" - -TRAINING_IN_PROGRESS_PROGRESS_STATUS = "Training In-Progress" - -DEPLOYING_MODEL_PROGRESS_STATUS = "Deploying Model" - -MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS = "Model Trained And Deployed" - - -INITIATING_TRAINING_PROGRESS_MESSAGE = "Download and preparing dataset" - -TRAINING_IN_PROGRESS_PROGRESS_MESSAGE = ( - "The dataset is being trained on all selected models" -) - -DEPLOYING_MODEL_PROGRESS_MESSAGE = ( - "Model training complete. The trained model is now being deployed" -) - -MODEL_TRAINED_AND_DEPLOYED_PROGRESS_MESSAGE = ( - "The model was trained and deployed successfully to the environment" -) - -MODEL_TRAINING_FAILED_ERROR = "Training Failed" - - -INITIATING_TRAINING_PROGRESS_PERCENTAGE = 30 - -TRAINING_IN_PROGRESS_PROGRESS_PERCENTAGE = 50 - -DEPLOYING_MODEL_PROGRESS_PERCENTAGE = 80 - -MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE = 100 - - -# Supported Models for Testing -SUPPORTED_BASE_MODELS = ["estbert", "xlm-roberta", "multilingual-distilbert"] - - -SUPPORTED_BASE_MODELS = ["estbert", "xlm-roberta", "multilingual-distilbert"] - -# Model configurations -MODEL_CONFIGS = { - "estbert": { - "model_name": "tartuNLP/EstBERT", - "tokenizer_name": "tartuNLP/EstBERT", - "type": "bert", - }, - "xlm-roberta": { - "model_name": "xlm-roberta-base", - "tokenizer_name": "xlm-roberta-base", - "type": "roberta", - }, - "multilingual-distilbert": { - "model_name": "distilbert-base-multilingual-cased", - "tokenizer_name": "distilbert-base-multilingual-cased", - "type": "distilbert", - }, -} - -# OOD Training configurations -SUPPORTED_OOD_METHODS = ["energy", "sngp", "softmax"] - -# OOD Default parameters -DEFAULT_OOD_CONFIGS = { - "energy": { - "energy_temp": 1.0, - "energy_margin": 10.0, - "energy_weight": 0.1, - "use_energy_loss": True, - }, - "sngp": { - "spec_norm_bound": 0.9, - "gp_hidden_dim": 128, - "gp_scale": 2.0, - "gp_bias": 0.0, - "gp_input_normalization": True, - "gp_random_feature_type": "orf", - "gp_cov_momentum": 0.999, - "gp_cov_ridge_penalty": 1e-3, - }, - "softmax": {"temperature": 1.0, "use_entropy": True, "calibrate": False}, -} - -# Training parameters -DEFAULT_TRAINING_ARGS = { - "num_train_epochs": 4, - "per_device_train_batch_size": 8, - "per_device_eval_batch_size": 8, - "learning_rate": 2e-5, - "warmup_steps": 100, - "weight_decay": 0.01, - "logging_steps": 50, - "eval_strategy": "epoch", - "save_strategy": "epoch", - "load_best_model_at_end": True, - "metric_for_best_model": "accuracy", -} - - -# Data pipeline constants -MIN_SAMPLES_PER_CLASS = 10 -TARGET_SAMPLES_FOR_SMALL_DATASETS = 50 -TEST_SIZE_RATIO = 0.2 - -# Model evaluation constants -ACCURACY_WEIGHT = 0.7 -F1_WEIGHT = 0.3 diff --git a/src/model-training/datapipeline.py b/src/model-training/datapipeline.py deleted file mode 100644 index 50e1ad51..00000000 --- a/src/model-training/datapipeline.py +++ /dev/null @@ -1,276 +0,0 @@ -import pandas as pd -import requests -from constants import ( - DATA_DOWNLOAD_ENDPOINT, - GET_DATASET_METADATA_ENDPOINT, -) -from loguru import logger -import sys - -logger.remove() -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -class DataPipeline: - def __init__(self, dg_id, cookie): - logger.info(f"DOWNLOADING DATASET WITH DGID - {dg_id}") - - cookies = {"customJwtCookie": cookie} - - # Download dataset - response = requests.get( - DATA_DOWNLOAD_ENDPOINT, params={"dgId": dg_id}, cookies=cookies - ) - - if response.status_code == 200: - logger.info("DATA DOWNLOAD SUCCESSFUL") - data = response.json() - df = pd.DataFrame(data) - - # Remove rowId if it exists - if "rowId" in df.columns: - df = df.drop("rowId", axis=1) - - self.df = df - logger.info(f"Downloaded dataset with {len(df)} samples") - logger.info(f"Dataset columns: {list(df.columns)}") - else: - logger.error( - f"DATA DOWNLOAD FAILED WITH ERROR CODE: {response.status_code}" - ) - logger.error(f"RESPONSE: {response.text}") - raise RuntimeError(f"ERROR RESPONSE {response.text}") - - # Get dataset metadata - logger.info("****** Getting Dataset Metadata ******") - logger.info(f"Endpoint: {GET_DATASET_METADATA_ENDPOINT}") - - response_hierarchy = requests.get( - GET_DATASET_METADATA_ENDPOINT, params={"groupId": dg_id}, cookies=cookies - ) - - if response_hierarchy.status_code == 200: - logger.info("DATASET METADATA RETRIEVAL SUCCESSFUL") - hierarchy = response_hierarchy.json() - self.hierarchy = hierarchy["response"]["data"][0] - logger.info( - f"Retrieved metadata for dataset: {self.hierarchy.get('name', 'Unknown')}" - ) - else: - logger.error( - f"DATASET METADATA RETRIEVAL FAILED: {response_hierarchy.status_code}" - ) - logger.error(f"RESPONSE: {response_hierarchy.text}") - raise RuntimeError(f"ERROR RESPONSE\n {response_hierarchy.text}") - - def extract_input_columns(self): - """Extract input columns from validation rules""" - validation_rules = self.hierarchy["validationCriteria"]["validationRules"] - input_columns = [ - key for key, value in validation_rules.items() if not value["isDataClass"] - ] - logger.info(f"Input columns identified: {input_columns}") - return input_columns - - def extract_target_column(self): - """Extract the target column from validation rules""" - validation_rules = self.hierarchy["validationCriteria"]["validationRules"] - target_columns = [ - key for key, value in validation_rules.items() if value["isDataClass"] - ] - - if not target_columns: - logger.error("No target column found in validation rules") - raise ValueError("No target column found in validation rules") - - target_column = target_columns[0] - logger.info(f"Target column identified: {target_column}") - return target_column - - def models_and_filters(self): - """ - Create models and filters for flat classification. - This maintains compatibility with the original hierarchical interface - but returns simplified structures for flat classification. - """ - target_column = self.extract_target_column() - unique_classes = sorted(self.df[target_column].unique().tolist()) - - # Create simple model structure for compatibility - models = [{1: unique_classes}] # Single model with all classes - filters = [unique_classes] # Single filter with all classes - - logger.info("Flat classification setup:") - logger.info(f" Target column: {target_column}") - logger.info(f" Classes ({len(unique_classes)}): {unique_classes}") - logger.info( - f" Class distribution: {self.df[target_column].value_counts().to_dict()}" - ) - - return models, filters - - def create_dataframes(self): - """Create dataframes for flat classification""" - logger.info("CREATING DATAFRAME FOR FLAT CLASSIFICATION") - - try: - input_columns = self.extract_input_columns() - target_column = self.extract_target_column() - - df = self.df.copy() - - # Create input text by combining input columns - if len(input_columns) == 1: - # Single input column - use directly - input_col = input_columns[0] - df["input"] = df[input_col].astype(str) - logger.info(f"Using single input column: {input_col}") - else: - # Multiple input columns - combine them - df["input"] = df[input_columns].apply( - lambda row: " ".join(row.dropna().astype(str)), axis=1 - ) - logger.info(f"Combined input columns: {input_columns}") - - # Set target column - df = df.rename(columns={target_column: "target"}) - - # Keep only input and target columns, remove any NaN values - df = df[["input", "target"]].dropna() - - # Validate the data - if len(df) == 0: - raise ValueError("No valid data samples after preprocessing") - - # Check for empty inputs - empty_inputs = df[df["input"].str.strip() == ""].shape[0] - if empty_inputs > 0: - logger.warning( - f"Found {empty_inputs} empty input samples, removing them" - ) - df = df[df["input"].str.strip() != ""] - - # Final validation - if len(df) == 0: - raise ValueError("No valid data samples after cleaning") - - unique_classes = df["target"].unique() - class_counts = df["target"].value_counts() - - logger.info("DATAFRAME CREATED SUCCESSFULLY:") - logger.info(f" Total samples: {len(df)}") - logger.info(f" Unique classes: {len(unique_classes)}") - logger.info(f" Class distribution: {class_counts.to_dict()}") - logger.info( - f" Sample input (first 100 chars): {df['input'].iloc[0][:100]}..." - ) - - # Check for class imbalance - min_class_size = class_counts.min() - max_class_size = class_counts.max() - - if min_class_size < 5: - logger.warning( - f"Some classes have very few samples (min: {min_class_size})" - ) - - if max_class_size / min_class_size > 10: - logger.warning( - f"Significant class imbalance detected (ratio: {max_class_size / min_class_size:.1f})" - ) - - return [ - df - ] # Return as list for compatibility with multi-dataframe training - - except Exception as e: - logger.error(f"ERROR CREATING DATAFRAME: {e}") - logger.error(f"Available columns: {list(self.df.columns)}") - logger.error(f"DataFrame shape: {self.df.shape}") - logger.error(f"DataFrame info: {self.df.info()}") - raise - - def get_data_statistics(self): - """Get detailed statistics about the dataset""" - try: - target_column = self.extract_target_column() - input_columns = self.extract_input_columns() - - stats = { - "total_samples": len(self.df), - "input_columns": input_columns, - "target_column": target_column, - "unique_classes": self.df[target_column].unique().tolist(), - "class_distribution": self.df[target_column].value_counts().to_dict(), - "missing_values": self.df.isnull().sum().to_dict(), - "data_types": self.df.dtypes.to_dict(), - } - - # Add text statistics for input columns - for col in input_columns: - if col in self.df.columns: - text_lengths = self.df[col].astype(str).str.len() - stats[f"{col}_text_stats"] = { - "avg_length": text_lengths.mean(), - "min_length": text_lengths.min(), - "max_length": text_lengths.max(), - "median_length": text_lengths.median(), - } - - return stats - - except Exception as e: - logger.error(f"Error getting data statistics: {e}") - return {} - - def validate_data_quality(self): - """Validate data quality and return issues""" - issues = [] - - try: - target_column = self.extract_target_column() - input_columns = self.extract_input_columns() - - # Check for missing target values - missing_targets = self.df[target_column].isnull().sum() - if missing_targets > 0: - issues.append(f"Missing target values: {missing_targets}") - - # Check for missing input values - for col in input_columns: - if col in self.df.columns: - missing_inputs = self.df[col].isnull().sum() - if missing_inputs > 0: - issues.append(f"Missing values in {col}: {missing_inputs}") - - # Check for empty strings in input columns - for col in input_columns: - if col in self.df.columns: - empty_strings = (self.df[col].astype(str).str.strip() == "").sum() - if empty_strings > 0: - issues.append(f"Empty strings in {col}: {empty_strings}") - - # Check class distribution - class_counts = self.df[target_column].value_counts() - min_class_size = class_counts.min() - if min_class_size < 3: - issues.append(f"Classes with very few samples (min: {min_class_size})") - - # Check for duplicate samples - if len(input_columns) == 1: - duplicates = self.df.duplicated( - subset=input_columns + [target_column] - ).sum() - if duplicates > 0: - issues.append(f"Duplicate samples: {duplicates}") - - if issues: - logger.warning(f"Data quality issues found: {issues}") - else: - logger.info("Data quality validation passed") - - return issues - - except Exception as e: - logger.error(f"Error validating data quality: {e}") - return [f"Validation error: {str(e)}"] diff --git a/src/model-training/model_trainer.py b/src/model-training/model_trainer.py deleted file mode 100644 index ec747ce9..00000000 --- a/src/model-training/model_trainer.py +++ /dev/null @@ -1,579 +0,0 @@ -from datapipeline import DataPipeline -from trainingpipeline import TrainingPipeline, create_training_pipeline -import os -import sys -import requests -import torch -import pickle -import shutil -import json -from datetime import datetime, timezone -from s3_ferry import S3Ferry -from constants import ( - TEST_DEPLOYMENT_ENDPOINT, - UPDATE_MODEL_TRAINING_STATUS_ENDPOINT, - UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT, - MODEL_RESULTS_PATH, - LOCAL_BASEMODEL_TRAINED_LAYERS_SAVE_PATH, - LOCAL_CLASSIFICATION_LAYER_SAVE_PATH, - LOCAL_LABEL_ENCODER_SAVE_PATH, - S3_FERRY_MODEL_STORAGE_PATH, - MODEL_TRAINING_SUCCESSFUL, - INITIATING_TRAINING_PROGRESS_STATUS, - TRAINING_IN_PROGRESS_PROGRESS_STATUS, - DEPLOYING_MODEL_PROGRESS_STATUS, - MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS, - INITIATING_TRAINING_PROGRESS_MESSAGE, - INITIATING_TRAINING_PROGRESS_PERCENTAGE, - TRAINING_IN_PROGRESS_PROGRESS_PERCENTAGE, - DEPLOYING_MODEL_PROGRESS_PERCENTAGE, - MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE, - MODEL_TRAINING_FAILED_ERROR, - MODEL_TRAINING_FAILED, - SUPPORTED_BASE_MODELS, - SUPPORTED_OOD_METHODS, - ACCURACY_WEIGHT, - F1_WEIGHT, -) -from loguru import logger - -logger.remove() -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -class ModelTrainer: - def __init__( - self, - cookie, - new_model_id, - old_model_id, - prev_deployment_env, - update_type, - progress_session_id, - model_details, - current_deployment_platform, - ) -> None: - try: - self.new_model_id = int(new_model_id) - self.old_model_id = int(old_model_id) - self.prev_deployment_env = prev_deployment_env - self.cookie = cookie - self.update_type = update_type - - self.cookies_payload = {"customJwtCookie": cookie} - self.progress_session_id = int(progress_session_id) - - logger.info(f"COOKIES PAYLOAD - {self.cookies_payload}") - - if self.update_type == "retrain": - logger.info( - f"ENTERING INTO RETRAIN SEQUENCE FOR MODELID - {self.new_model_id}" - ) - - # Determine if this is a replacement deployment - if self.old_model_id == self.new_model_id: - self.replace_deployment = False - else: - self.replace_deployment = True - - self.model_details = model_details - self.current_deployment_platform = current_deployment_platform - - except Exception as e: - logger.error(f"EXCEPTION IN MODEL_TRAINER INIT : {e}") - self.send_error_progress_session(str(e)) - - @staticmethod - def create_training_folders(folder_paths): - logger.info("CREATING FOLDER PATHS") - try: - for folder_path in folder_paths: - if not os.path.exists(folder_path): - os.makedirs(folder_path) - logger.success(f"SUCCESSFULLY CREATED MODEL FOLDER PATHS : {folder_paths}") - except Exception as e: - logger.error(f"FAILED TO CREATE MODEL FOLDER PATHS : {folder_paths}") - raise RuntimeError(e) - - def update_model_db_training_status( - self, - training_status, - model_s3_location, - last_trained_time_stamp, - training_results, - inference_routes, - ): - training_results_payload = {"trainingResults": {}} - - if len(training_results) == 3: - logger.info( - f"UPDATE TRAINING STATUS DB RESULTS PAYLOAD: {training_results}" - ) - training_results_payload["trainingResults"]["classes"] = training_results[0] - training_results_payload["trainingResults"]["accuracy"] = training_results[ - 1 - ] - training_results_payload["trainingResults"]["f1_score"] = training_results[ - 2 - ] - else: - training_results_payload["trainingResults"]["classes"] = "" - training_results_payload["trainingResults"]["accuracy"] = "0.0" - training_results_payload["trainingResults"]["f1_score"] = "0" - - payload = { - "modelId": self.new_model_id, - "trainingStatus": training_status, - "modelS3Location": model_s3_location, - "lastTrainedTimestamp": last_trained_time_stamp, - "trainingResults": training_results_payload, - "inferenceRoutes": {"inference_routes": inference_routes}, - } - - logger.info(f"{training_status} UPLOAD PAYLOAD - \n {payload}") - - response = requests.post( - url=UPDATE_MODEL_TRAINING_STATUS_ENDPOINT, - json=payload, - cookies=self.cookies_payload, - ) - - if response.status_code == 200: - logger.info( - f"REQUEST TO UPDATE MODEL TRAINING STATUS TO {training_status} SUCCESSFUL" - ) - else: - logger.error( - f"REQUEST TO UPDATE MODEL TRAINING STATUS TO {training_status} FAILED" - ) - logger.error(f"ERROR RESPONSE {response.text}") - self.send_error_progress_session(f"Error :{str(response.text)}") - raise RuntimeError(response.text) - - def send_error_progress_session(self, error_msg): - response = self.update_model_training_progress_session( - MODEL_TRAINING_FAILED_ERROR, error_msg, 100, True - ) - current_timestamp = self.get_current_timestamp() - self.update_model_db_training_status( - training_status=MODEL_TRAINING_FAILED, - model_s3_location="", - last_trained_time_stamp=current_timestamp, - training_results=[], - inference_routes=[], - ) - return response - - def update_model_training_progress_session( - self, - training_status, - training_progress_update_message, - training_progress_percentage, - process_complete, - ): - payload = { - "sessionId": self.progress_session_id, - "trainingStatus": training_status, - "trainingMessage": training_progress_update_message, - "progressPercentage": training_progress_percentage, - "processComplete": process_complete, - } - - logger.info( - f"Update training progress session for model id - {self.new_model_id} payload \n {payload}" - ) - - response = requests.post( - url=UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT, - json=payload, - cookies=self.cookies_payload, - ) - - if response.status_code == 200: - logger.info( - f"REQUEST TO UPDATE TRAINING PROGRESS SESSION FOR MODEL ID {self.new_model_id} SUCCESSFUL" - ) - session_id = response.json()["response"]["sessionId"] - else: - logger.error( - f"REQUEST TO UPDATE TRAINING PROGRESS SESSION FOR MODEL ID {self.new_model_id} FAILED" - ) - logger.error(f"ERROR RESPONSE {response.text}") - raise RuntimeError(response.text) - - return session_id - - def deploy_model(self, best_model_info, progress_session_id, dg_id): - payload = { - "modelId": self.new_model_id, - "oldModelId": self.old_model_id, - "replaceDeployment": self.replace_deployment, - "replaceDeploymentPlatform": self.prev_deployment_env, - "bestBaseModel": best_model_info["name"], - "bestModelType": best_model_info["type"], - "progressSessionId": progress_session_id, - "updateType": self.update_type, - "dgId": dg_id, - } - - if self.update_type == "retrain": - payload["replaceDeploymentPlatform"] = self.current_deployment_platform - - logger.info( - f"SENDING MODEL DEPLOYMENT REQUEST FOR MODEL ID - {self.new_model_id}" - ) - logger.info(f"MODEL DEPLOYMENT PAYLOAD - {payload}") - - if self.current_deployment_platform == "testing": - deployment_url = TEST_DEPLOYMENT_ENDPOINT - elif self.current_deployment_platform == "undeployed": - logger.info("DEPLOYMENT ENVIRONMENT IS UNDEPLOYED") - return None - else: - logger.error( - f"UNRECOGNIZED DEPLOYMENT PLATFORM - {self.current_deployment_platform}" - ) - self.send_error_progress_session( - f"UNRECOGNIZED DEPLOYMENT PLATFORM - {str(self.current_deployment_platform)}" - ) - raise RuntimeError( - f"RUNTIME ERROR - UNRECOGNIZED DEPLOYMENT PLATFORM - {self.current_deployment_platform}" - ) - - response = requests.post( - url=deployment_url, json=payload, cookies=self.cookies_payload - ) - - if response.status_code == 200: - logger.info(f"REQUEST TO DEPLOY MODEL ID {self.new_model_id} SUCCESSFUL") - else: - logger.error(f"REQUEST TO DEPLOY MODEL ID {self.new_model_id} FAILED") - logger.error(f"ERROR RESPONSE {response.text}") - raise RuntimeError(response.text) - - def get_current_timestamp(self): - current_timestamp = int(datetime.now(timezone.utc).timestamp()) - return current_timestamp - - def calculate_combined_score(self, accuracies, f1_scores): - """Calculate combined score using weighted average""" - if not accuracies or not f1_scores: - return 0.0 - - avg_accuracy = sum(accuracies) / len(accuracies) - avg_f1 = sum(f1_scores) / len(f1_scores) - - combined_score = (ACCURACY_WEIGHT * avg_accuracy) + (F1_WEIGHT * avg_f1) - return combined_score - - def train(self): - """UNIFIED TRAINING METHOD - TRAINS ALL VARIANTS""" - try: - logger.info("ENTERING UNIFIED TRAINING FUNCTION") - logger.info(f"DEPLOYMENT PLATFORM - {self.current_deployment_platform}") - - session_id = self.progress_session_id - logger.info(f"SESSION ID - {session_id}") - - # Update initial progress - self.update_model_training_progress_session( - training_status=INITIATING_TRAINING_PROGRESS_STATUS, - training_progress_update_message=INITIATING_TRAINING_PROGRESS_MESSAGE, - training_progress_percentage=INITIATING_TRAINING_PROGRESS_PERCENTAGE, - process_complete=False, - ) - - # Initialize services - s3_ferry = S3Ferry() - dg_id = self.model_details["response"]["data"][0]["connectedDgId"] - - # Load data - data_pipeline = DataPipeline(dg_id, self.cookie) - dfs = data_pipeline.create_dataframes() - models_inference_metadata, _ = data_pipeline.models_and_filters() - - logger.info(f"MODELS_INFERENCE_METADATA : {models_inference_metadata}") - - # Setup paths - local_basemodel_layers_save_path = ( - LOCAL_BASEMODEL_TRAINED_LAYERS_SAVE_PATH.format( - model_id=self.new_model_id - ) - ) - local_classification_layer_save_path = ( - LOCAL_CLASSIFICATION_LAYER_SAVE_PATH.format(model_id=self.new_model_id) - ) - local_label_encoder_save_path = LOCAL_LABEL_ENCODER_SAVE_PATH.format( - model_id=self.new_model_id - ) - - self.create_training_folders( - [ - local_basemodel_layers_save_path, - local_classification_layer_save_path, - local_label_encoder_save_path, - ] - ) - - # Save inference metadata - with open( - f"{MODEL_RESULTS_PATH}/{self.new_model_id}/models_dets.pkl", "wb" - ) as file: - pickle.dump(models_inference_metadata, file) - - # Generate all model variants to train - model_variants = [] - - # Add standard models - for base_model in SUPPORTED_BASE_MODELS: - model_variants.append( - { - "name": base_model, - "base_model": base_model, - "ood_method": None, - "type": "standard", - } - ) - - # Add OOD variants - for base_model in SUPPORTED_BASE_MODELS: - for ood_method in SUPPORTED_OOD_METHODS: - model_variants.append( - { - "name": f"{base_model}-{ood_method}", - "base_model": base_model, - "ood_method": ood_method, - "type": "ood", - } - ) - - logger.info(f"TRAINING {len(model_variants)} MODEL VARIANTS:") - for variant in model_variants: - logger.info(f" - {variant['name']} ({variant['type']})") - - # Update progress to training phase - self.update_model_training_progress_session( - training_status=TRAINING_IN_PROGRESS_PROGRESS_STATUS, - training_progress_update_message=f"Training {len(model_variants)} model variants (Standard + OOD)", - training_progress_percentage=TRAINING_IN_PROGRESS_PROGRESS_PERCENTAGE, - process_complete=False, - ) - - # Train all variants - all_results = [] - - for i, variant in enumerate(model_variants): - logger.info( - f"TRAINING VARIANT {i + 1}/{len(model_variants)}: {variant['name']}" - ) - - try: - # Create training pipeline - if variant["ood_method"]: - training_pipeline = create_training_pipeline( - dfs=dfs, - model_name=variant["base_model"], - ood_method=variant["ood_method"], - ) - else: - training_pipeline = TrainingPipeline(dfs, variant["base_model"]) - - # Train the variant - metrics, models, classifiers, label_encoders, basic_model = ( - training_pipeline.train() - ) - - # Calculate combined score - _, accuracies, f1_scores = metrics - combined_score = self.calculate_combined_score( - accuracies, f1_scores - ) - - # Store results - result = { - "variant": variant, - "metrics": metrics, - "models": models, - "classifiers": classifiers, - "label_encoders": label_encoders, - "basic_model": basic_model, - "combined_score": combined_score, - "avg_accuracy": ( - sum(accuracies) / len(accuracies) if accuracies else 0 - ), - "avg_f1": sum(f1_scores) / len(f1_scores) if f1_scores else 0, - } - - all_results.append(result) - - logger.info( - f"COMPLETED {variant['name']} - Combined Score: {combined_score:.4f}" - ) - logger.info( - f" Avg Accuracy: {result['avg_accuracy']:.4f}, Avg F1: {result['avg_f1']:.4f}" - ) - - except Exception as e: - logger.error(f"FAILED TO TRAIN {variant['name']}: {e}") - continue - - # Select best model across all variants - if not all_results: - raise RuntimeError("No models were successfully trained") - - best_result = max(all_results, key=lambda x: x["combined_score"]) - best_variant = best_result["variant"] - - logger.info(f"BEST MODEL SELECTED: {best_variant['name']}") - logger.info(f"BEST COMBINED SCORE: {best_result['combined_score']:.4f}") - logger.info(f"BEST MODEL TYPE: {best_variant['type']}") - - # Save best model artifacts - for i, (model, classifier, label_encoder) in enumerate( - zip( - best_result["models"], - best_result["classifiers"], - best_result["label_encoders"], - ) - ): - torch.save( - model, - f"{local_basemodel_layers_save_path}/last_two_layers_dfs_{i}.pth", - ) - torch.save( - classifier, - f"{local_classification_layer_save_path}/classifier_{i}.pth", - ) - - label_encoder_path = ( - f"{local_label_encoder_save_path}/label_encoder_{i}.pkl" - ) - with open(label_encoder_path, "wb") as file: - pickle.dump(label_encoder, file) - - # Save basic model - torch.save( - best_result["basic_model"], - f"{MODEL_RESULTS_PATH}/{self.new_model_id}/model_state_dict.pth", - ) - - # Save training summary - training_summary = { - "best_model": best_variant, - "best_score": best_result["combined_score"], - "all_results": [ - { - "variant": r["variant"], - "combined_score": r["combined_score"], - "avg_accuracy": r["avg_accuracy"], - "avg_f1": r["avg_f1"], - } - for r in all_results - ], - "total_variants_trained": len(all_results), - "training_timestamp": self.get_current_timestamp(), - } - - with open( - f"{MODEL_RESULTS_PATH}/{self.new_model_id}/training_summary.json", "w" - ) as f: - json.dump(training_summary, f, indent=2) - - # Create model archive - model_zip_path = f"{MODEL_RESULTS_PATH}/{str(self.new_model_id)}" - shutil.make_archive( - base_name=model_zip_path, root_dir=model_zip_path, format="zip" - ) - - # Upload to S3 - s3_save_location = f"{S3_FERRY_MODEL_STORAGE_PATH}/{str(self.new_model_id)}/{str(self.new_model_id)}.zip" - local_source_location = f"{MODEL_RESULTS_PATH.replace('/shared/', '')}/{str(self.new_model_id)}.zip" - - logger.info("INITIATING MODEL UPLOAD TO S3") - _ = s3_ferry.transfer_file( - s3_save_location, "S3", local_source_location, "FS" - ) - - # Cleanup local files - MODEL_RESULT_FOLDER = f"{MODEL_RESULTS_PATH}/{self.new_model_id}" - MODEL_RESULT_ZIP_FILE = f"{MODEL_RESULTS_PATH}/{self.new_model_id}.zip" - - if os.path.exists(MODEL_RESULT_FOLDER): - try: - shutil.rmtree(MODEL_RESULT_FOLDER) - logger.info(f"Cleaned up folder '{MODEL_RESULT_FOLDER}'") - except Exception as e: - logger.warning( - f"Could not delete folder '{MODEL_RESULT_FOLDER}': {e}" - ) - - if os.path.exists(MODEL_RESULT_ZIP_FILE): - try: - os.remove(MODEL_RESULT_ZIP_FILE) - logger.info(f"Cleaned up zip file '{MODEL_RESULT_ZIP_FILE}'") - except Exception as e: - logger.warning( - f"Could not delete zip file '{MODEL_RESULT_ZIP_FILE}': {e}" - ) - - # Update database with best model results - current_timestamp = self.get_current_timestamp() - self.update_model_db_training_status( - training_status=MODEL_TRAINING_SUCCESSFUL, - model_s3_location=s3_save_location, - last_trained_time_stamp=current_timestamp, - training_results=best_result["metrics"], - inference_routes=models_inference_metadata, - ) - - # Update progress to deployment phase - self.update_model_training_progress_session( - training_status=DEPLOYING_MODEL_PROGRESS_STATUS, - training_progress_update_message=f"Deploying best model: {best_variant['name']}", - training_progress_percentage=DEPLOYING_MODEL_PROGRESS_PERCENTAGE, - process_complete=False, - ) - - # Deploy the best model - if self.current_deployment_platform == "undeployed": - logger.info("MODEL DEPLOYMENT PLATFORM IS UNDEPLOYED") - self.update_model_training_progress_session( - training_status=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS, - training_progress_update_message=f"Best model ({best_variant['name']}) trained successfully - No deployment", - training_progress_percentage=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE, - process_complete=True, - ) - logger.info("UNIFIED TRAINING COMPLETED") - else: - logger.info( - f"INITIATING DEPLOYMENT OF {best_variant['name']} TO {self.current_deployment_platform}" - ) - self.deploy_model( - best_model_info=best_variant, - progress_session_id=session_id, - dg_id=dg_id, - ) - - self.update_model_training_progress_session( - training_status=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS, - training_progress_update_message=f"Best model ({best_variant['name']}) trained and deployed successfully", - training_progress_percentage=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE, - process_complete=True, - ) - - logger.info("=" * 60) - logger.info("UNIFIED TRAINING COMPLETED SUCCESSFULLY") - logger.info(f"BEST MODEL: {best_variant['name']}") - logger.info(f"FINAL SCORE: {best_result['combined_score']:.4f}") - logger.info(f"VARIANTS TRAINED: {len(all_results)}") - logger.info("=" * 60) - - except Exception as e: - import traceback - - logger.error(f"EXCEPTION IN UNIFIED MODEL TRAINER: {e}") - logger.error(traceback.format_exc()) - self.send_error_progress_session( - f"UNIFIED TRAINING CRASHED - ERROR - {str(e)}" - ) - raise diff --git a/src/model-training/model_trainer_api.py b/src/model-training/model_trainer_api.py deleted file mode 100644 index 68e5b486..00000000 --- a/src/model-training/model_trainer_api.py +++ /dev/null @@ -1,274 +0,0 @@ -from fastapi.middleware.cors import CORSMiddleware -from fastapi import FastAPI - -from model_trainer import ModelTrainer -import json -import os -from typing import Optional -from loguru import logger -from pydantic import BaseModel -import requests -import sys - -logger.remove() -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") -print("INIT STARTED model_trainer_api.py") - - -logger.info("INIT STARTED model_trainer_api.py") - -app = FastAPI(title="Model Training API - Unified Training") - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -print("PROCESS STARTED model_trainer_api.py") -logger.info("PROCESS STARTED model_trainer_api.py") - - -class SessionPayload(BaseModel): - """Unified payload for all training (standard + OOD variants)""" - - cookie: str - old_model_id: str - new_model_id: str - update_type: str - prev_deployment_env: Optional[str] = None - progress_session_id: int - deployment_env: str - model_details: str - - -# Global training status -Training = False - - -@app.post("/model_trainer/") -async def unified_model_train(payload: SessionPayload): - """ - Unified training endpoint that trains all model variants: - - Standard models: estbert, xlm-roberta, multilingual-distilbert - - OOD variants: energy, SNGP, softmax for each base model - - Selects and deploys the best performing model across all variants. - """ - global Training - - try: - print("Starting unified model training") - print("payload: ", payload.dict()) - - # Extract payload data - cookie = payload.cookie - new_model_id = payload.new_model_id - old_model_id = payload.old_model_id - prev_deployment_env = payload.prev_deployment_env - update_type = payload.update_type - progress_session_id = payload.progress_session_id - model_details = json.loads(payload.model_details) - current_deployment_platform = payload.deployment_env - - logger.info(f"UNIFIED TRAINING STARTED FOR MODEL {new_model_id}") - logger.info("TRAINING ALL VARIANTS: Standard + OOD methods") - - Training = True - - # Update initial progress - update_model_training_progress_session( - progress_session_id=progress_session_id, - new_model_id=new_model_id, - training_status="Training In-Progress", - training_progress_update_message="Starting unified training of all model variants (Standard + OOD)", - training_progress_percentage=10, - process_complete=False, - cookie=cookie, - ) - - # Initialize and start unified training - logger.info("INITIALIZING UNIFIED MODEL TRAINER") - - trainer = ModelTrainer( - cookie=cookie, - new_model_id=new_model_id, - old_model_id=old_model_id, - prev_deployment_env=prev_deployment_env, - update_type=update_type, - progress_session_id=progress_session_id, - current_deployment_platform=current_deployment_platform, - model_details=model_details, - ) - - # Train all variants - logger.info("STARTING UNIFIED TRAINING") - trainer.train() - logger.info("UNIFIED TRAINING COMPLETED") - - # Final progress update - update_model_training_progress_session( - progress_session_id=progress_session_id, - new_model_id=new_model_id, - training_status="Training Completed", - training_progress_update_message="Unified training completed - best model selected and deployed", - training_progress_percentage=100, - process_complete=True, - cookie=cookie, - ) - - Training = False - logger.info("UNIFIED TRAINING SCRIPT COMPLETED") - - return { - "status": "success", - "message": "Unified training completed successfully", - "model_id": new_model_id, - "session_id": progress_session_id, - "training_type": "unified_standard_and_ood", - } - - except Exception as e: - Training = False - logger.error(f"Error in unified model training: {e}") - print(f"Error in unified model training: {e}") - - # Update error status - try: - update_model_training_progress_session( - progress_session_id=payload.progress_session_id, - new_model_id=payload.new_model_id, - training_status="Training Failed", - training_progress_update_message=f"Unified Training Failed: {str(e)}", - training_progress_percentage=100, - process_complete=True, - cookie=payload.cookie, - ) - except Exception as update_error: - logger.error(f"Failed to update training progress on error: {update_error}") - - return { - "status": "error", - "message": f"Unified training failed: {str(e)}", - "error_type": type(e).__name__, - "training_type": "unified_standard_and_ood", - } - - -@app.get("/model_checker/") -async def model_checker(): - """Check current training status""" - print("Checking training status") - print("Training: ", Training) - return {"Training": Training} - - -@app.get("/supported_models/") -async def get_supported_models(): - """Return all supported model variants""" - base_models = ["estbert", "xlm-roberta", "multilingual-distilbert"] - ood_methods = ["energy", "sngp", "softmax"] - - models = [] - - # Add standard models - for model in base_models: - models.append( - { - "name": model, - "type": "standard", - "description": f"Standard {model} training", - } - ) - - # Add OOD variants - for model in base_models: - for ood_method in ood_methods: - models.append( - { - "name": f"{model}-{ood_method}", - "type": "ood", - "base_model": model, - "ood_method": ood_method, - "description": f"{model} with {ood_method.upper()} OOD detection", - } - ) - - return { - "base_models": base_models, - "ood_methods": ood_methods, - "all_variants": models, - "total_variants": len(models), - } - - -@app.get("/training_status/{session_id}") -async def get_training_status(session_id: int): - """Get detailed training status for a specific session""" - return { - "session_id": session_id, - "is_training": Training, - "status": "in_progress" if Training else "idle", - "message": "Unified training in progress" if Training else "No active training", - "training_type": "unified_standard_and_ood", - } - - -def update_model_training_progress_session( - progress_session_id, - new_model_id, - training_status, - training_progress_update_message, - training_progress_percentage, - process_complete, - cookie, -): - """Update model training progress session""" - payload = { - "sessionId": progress_session_id, - "trainingStatus": training_status, - "trainingMessage": training_progress_update_message, - "progressPercentage": training_progress_percentage, - "processComplete": process_complete, - } - - logger.info( - f"Update training progress session for model id - {new_model_id} payload \n {payload}" - ) - - # Use environment variable for endpoint - update_endpoint = os.getenv( - "UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT", - "http://ruuter-private:8088/classifier/datamodel/progress/update", - ) - - try: - response = requests.post( - url=update_endpoint, - json=payload, - cookies={"customJwtCookie": cookie}, - timeout=10, - ) - - if response.status_code == 200: - logger.info( - f"UPDATE TRAINING PROGRESS SESSION FOR MODEL ID {new_model_id} SUCCESSFUL" - ) - session_id = response.json()["response"]["sessionId"] - return session_id - else: - logger.error( - f"UPDATE TRAINING PROGRESS SESSION FOR MODEL ID {new_model_id} FAILED" - ) - logger.error(f"Response: {response.text}") - raise RuntimeError(response.text) - - except requests.exceptions.RequestException as e: - logger.error(f"Request failed for progress update: {e}") - # In case of network issues, just return the session ID - return progress_session_id - except Exception as e: - logger.error(f"Unexpected error in progress update: {e}") - return progress_session_id diff --git a/src/model-training/requirements-cpu.txt b/src/model-training/requirements-cpu.txt deleted file mode 100644 index 405e6b67..00000000 --- a/src/model-training/requirements-cpu.txt +++ /dev/null @@ -1,94 +0,0 @@ -# CPU-only requirements for model training pipeline -# Install with: pip install -r requirements-cpu.txt - -# PyTorch CPU-only version -torch==2.4.0+cpu -torchvision==0.19.0+cpu -torchaudio==2.4.0+cpu ---extra-index-url https://download.pytorch.org/whl/cpu - -# Core ML and data processing -pandas==2.2.2 -scikit-learn==1.5.1 -numpy==1.24.4 -scipy==1.11.3 - -# Transformers and NLP -transformers==4.44.0 -tokenizers==0.19.1 -huggingface-hub==0.24.2 -safetensors==0.4.3 -accelerate==0.33.0 -sentencepiece==0.2.0 - -# For Estonian models and multilingual support -sentence-transformers==2.2.2 -langdetect==1.0.9 - -# Pydantic and validation -pydantic==2.8.2 -pydantic-core==2.20.1 -annotated-types==0.7.0 - -# FastAPI and web framework -fastapi==0.111.1 -fastapi-cli==0.0.4 -uvicorn[standard]==0.30.5 -starlette==0.37.2 -python-multipart==0.0.9 - -# HTTP and networking -requests==2.32.3 -httpx==0.27.0 -httpcore==1.0.5 -httptools==0.6.1 -h11==0.14.0 -urllib3==2.2.2 -certifi==2024.7.4 -charset-normalizer==3.3.2 -idna==3.7 - -# Utilities and logging -loguru==0.7.2 -click==8.1.7 -tqdm==4.66.4 -rich==13.7.1 -colorama==0.4.6 - -# Configuration and environment -python-dotenv==1.0.1 -PyYAML==6.0.1 - -# File handling and serialization -pillow==10.2.0 -regex==2024.7.24 -filelock==3.15.4 -fsspec==2024.6.1 - -# Web server and async -anyio==4.4.0 -sniffio==1.3.1 -websockets==12.0 -watchfiles==0.22.0 -dnspython==2.6.1 -email-validator==2.2.0 - -# Template and markup -Jinja2==3.1.4 -MarkupSafe==2.1.5 -markdown-it-py==3.0.0 -mdurl==0.1.2 -Pygments==2.18.0 - -# Math and scientific computing -sympy==1.12 -mpmath==1.3.0 -networkx==3.2.1 -packaging==24.1 - -# System and CLI -typer==0.12.3 -shellingham==1.5.4 -typing-extensions==4.12.2 -six==1.16.0 -exceptiongroup==1.2.2 diff --git a/src/model-training/requirements-gpu.txt b/src/model-training/requirements-gpu.txt deleted file mode 100644 index 1f291734..00000000 --- a/src/model-training/requirements-gpu.txt +++ /dev/null @@ -1,99 +0,0 @@ -# GPU-enabled requirements for model training pipeline -# Note: Requires NVIDIA GPU with CUDA 12.1+ support - -# PyTorch GPU version with CUDA 12.1 support -torch==2.4.0 -torchvision==0.19.0 -torchaudio==2.4.0 ---extra-index-url https://download.pytorch.org/whl/cu121 - -# Core ML and data processing -pandas==2.2.2 -scikit-learn==1.5.1 -numpy==1.24.4 -scipy==1.11.3 - -# Transformers and NLP (with GPU acceleration) -transformers==4.44.0 -tokenizers==0.19.1 -huggingface-hub==0.24.2 -safetensors==0.4.3 -accelerate==0.33.0 -sentencepiece==0.2.0 - -# For Estonian models and multilingual support -sentence-transformers==2.2.2 -langdetect==1.0.9 - -# GPU-specific acceleration libraries -# CUDA toolkit components (if not system-installed) -nvidia-ml-py3==7.352.0 - -# Pydantic and validation -pydantic==2.8.2 -pydantic-core==2.20.1 -annotated-types==0.7.0 - -# FastAPI and web framework -fastapi==0.111.1 -fastapi-cli==0.0.4 -uvicorn[standard]==0.30.5 -starlette==0.37.2 -python-multipart==0.0.9 - -# HTTP and networking -requests==2.32.3 -httpx==0.27.0 -httpcore==1.0.5 -httptools==0.6.1 -h11==0.14.0 -urllib3==2.2.2 -certifi==2024.7.4 -charset-normalizer==3.3.2 -idna==3.7 - -# Utilities and logging -loguru==0.7.2 -click==8.1.7 -tqdm==4.66.4 -rich==13.7.1 -colorama==0.4.6 - -# Configuration and environment -python-dotenv==1.0.1 -PyYAML==6.0.1 - -# File handling and serialization -pillow==10.2.0 -regex==2024.7.24 -filelock==3.15.4 -fsspec==2024.6.1 - -# Web server and async -anyio==4.4.0 -sniffio==1.3.1 -websockets==12.0 -watchfiles==0.22.0 -dnspython==2.6.1 -email-validator==2.2.0 - -# Template and markup -Jinja2==3.1.4 -MarkupSafe==2.1.5 -markdown-it-py==3.0.0 -mdurl==0.1.2 -Pygments==2.18.0 - -# Math and scientific computing -sympy==1.12 -mpmath==1.3.0 -networkx==3.2.1 -packaging==24.1 - -# System and CLI -typer==0.12.3 -shellingham==1.5.4 -typing-extensions==4.12.2 -six==1.16.0 -exceptiongroup==1.2.2 - diff --git a/src/model-training/requirements.txt b/src/model-training/requirements.txt deleted file mode 100644 index 660cb46c..00000000 --- a/src/model-training/requirements.txt +++ /dev/null @@ -1,65 +0,0 @@ -pandas==2.2.2 -scikit_learn==1.5.1 -pydantic==2.8.2 -annotated-types==0.7.0 -accelerate==0.33.0 -anyio==4.4.0 -certifi==2024.7.4 -charset-normalizer==3.3.2 -click==8.1.7 -colorama==0.4.6 -dnspython==2.6.1 -email_validator==2.2.0 -exceptiongroup==1.2.2 -fastapi==0.111.1 -fastapi-cli==0.0.4 -filelock==3.15.4 -fsspec==2024.6.1 -h11==0.14.0 -httpcore==1.0.5 -httptools==0.6.1 -httpx==0.27.0 -huggingface-hub==0.24.2 -idna==3.7 -Jinja2==3.1.4 -langdetect==1.0.9 -markdown-it-py==3.0.0 -MarkupSafe==2.1.5 -mdurl==0.1.2 -mpmath==1.3.0 -networkx==3.2.1 -numpy==1.24.4 -packaging==24.1 -pillow==10.2.0 -pydantic==2.8.2 -pydantic_core==2.20.1 -Pygments==2.18.0 -python-dotenv==1.0.1 -python-multipart==0.0.9 -PyYAML==6.0.1 -regex==2024.7.24 -requests==2.32.3 -rich==13.7.1 -safetensors==0.4.3 -sentencepiece==0.2.0 -sentence-transformers==2.2.2 # For multilingual sentence models -shellingham==1.5.4 -six==1.16.0 -sniffio==1.3.1 -starlette==0.37.2 -sympy==1.12 -tokenizers==0.19.1 -torch==2.4.0 -tqdm==4.66.4 -transformers==4.44.0 -typer==0.12.3 -typing_extensions==4.12.2 -urllib3==2.2.2 -uvicorn==0.30.5 -watchfiles==0.22.0 -websockets==12.0 -loguru==0.7.2 -requests==2.32.3 -scipy==1.11.3 -matplotlib==3.7.2 -seaborn==0.12.2 \ No newline at end of file diff --git a/src/model-training/s3_ferry.py b/src/model-training/s3_ferry.py deleted file mode 100644 index a5530c5e..00000000 --- a/src/model-training/s3_ferry.py +++ /dev/null @@ -1,46 +0,0 @@ -import requests -from loguru import logger -from constants import S3_FERRY_ENDPOINT - -import sys - -logger.remove() -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -class S3Ferry: - def __init__(self): - # Updated to use correct Docker service name - self.url = S3_FERRY_ENDPOINT - - def transfer_file( - self, - destination_file_path, - destination_storage_type, - source_file_path, - source_storage_type, - ): - payload = self.get_s3_ferry_payload( - destination_file_path, - destination_storage_type, - source_file_path, - source_storage_type, - ) - - response = requests.post(self.url, json=payload) - return response - - def get_s3_ferry_payload( - self, - destination_file_path: str, - destination_storage_type: str, - source_file_path: str, - source_storage_type: str, - ): - S3_FERRY_PAYLOAD = { - "destinationFilePath": destination_file_path, - "destinationStorageType": destination_storage_type, - "sourceFilePath": source_file_path, - "sourceStorageType": source_storage_type, - } - return S3_FERRY_PAYLOAD diff --git a/src/model-training/trainingpipeline.py b/src/model-training/trainingpipeline.py deleted file mode 100644 index ebe5c525..00000000 --- a/src/model-training/trainingpipeline.py +++ /dev/null @@ -1,800 +0,0 @@ -from transformers import ( - XLMRobertaTokenizer, - XLMRobertaForSequenceClassification, - Trainer, - TrainingArguments, - DistilBertTokenizer, - DistilBertForSequenceClassification, - BertForSequenceClassification, - BertTokenizer, -) -from torch.utils.data import Dataset -from sklearn.preprocessing import LabelEncoder -from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score, classification_report -import torch -import torch.nn as nn -import torch.nn.functional as F -import shutil -import pandas as pd -import numpy as np -import sys -from constants import ( - MODEL_CONFIGS, - SUPPORTED_BASE_MODELS, - SUPPORTED_OOD_METHODS, - DEFAULT_OOD_CONFIGS, - DEFAULT_TRAINING_ARGS, - MIN_SAMPLES_PER_CLASS, - TARGET_SAMPLES_FOR_SMALL_DATASETS, - TEST_SIZE_RATIO, -) -from loguru import logger -import os -from transformers import logging as transformers_logging -import warnings - -warnings.filterwarnings( - "ignore", - message="Some weights of the model checkpoint were not used when initializing", -) -transformers_logging.set_verbosity_error() - - -logger.remove() -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -class CustomDataset(Dataset): - def __init__(self, encodings, labels, ood_labels=None): - self.encodings = encodings - self.labels = torch.tensor(labels, dtype=torch.long) - self.ood_labels = ( - torch.tensor(ood_labels, dtype=torch.long) - if ood_labels is not None - else None - ) - - def __getitem__(self, idx): - item = {key: val[idx] for key, val in self.encodings.items()} - item["labels"] = self.labels[idx] - if self.ood_labels is not None: - item["ood_labels"] = self.ood_labels[idx] - return item - - def __len__(self): - return len(self.labels) - - -class SpectralNormalization(nn.Module): - """Spectral normalization layer for improved uncertainty estimation""" - - def __init__(self, layer, n_power_iterations=1, eps=1e-12): - super().__init__() - self.layer = layer - self.n_power_iterations = n_power_iterations - self.eps = eps - - # Initialize spectral norm - with torch.no_grad(): - weight = getattr(layer, "weight") - h, w = weight.size() - u = nn.Parameter(torch.randn(h), requires_grad=False) - v = nn.Parameter(torch.randn(w), requires_grad=False) - self.register_parameter("u", u) - self.register_parameter("v", v) - - def forward(self, x): - if self.training: - self._update_uv() - return self.layer(x) - - def _update_uv(self): - weight = getattr(self.layer, "weight") - with torch.no_grad(): - for _ in range(self.n_power_iterations): - self.v.data = F.normalize( - torch.mv(weight.t(), self.u), dim=0, eps=self.eps - ) - self.u.data = F.normalize(torch.mv(weight, self.v), dim=0, eps=self.eps) - - sigma = torch.dot(self.u, torch.mv(weight, self.v)) - weight.data /= sigma - - -class OODLoss(nn.Module): - """Combined loss for OOD detection""" - - def __init__(self, ood_weight=0.1, energy_margin=10.0, temperature=1.0): - super().__init__() - self.ood_weight = ood_weight - self.energy_margin = energy_margin - self.temperature = temperature - self.ce_loss = nn.CrossEntropyLoss() - - def forward(self, logits, labels, ood_labels=None): - # Standard classification loss - ce_loss = self.ce_loss(logits, labels) - - if ood_labels is None: - return ce_loss - - # Energy-based OOD loss - energy = -torch.logsumexp(logits / self.temperature, dim=-1) - - # Separate ID and OOD samples - is_ood = (ood_labels == 1).float() - is_id = 1.0 - is_ood - - # Energy loss: low energy for ID, high energy for OOD - energy_loss = ( - torch.relu(self.energy_margin - energy) * is_ood # High energy for OOD - + energy * is_id # Low energy for ID - ) - - total_loss = ce_loss + self.ood_weight * energy_loss.mean() - return total_loss - - -class EnhancedModel(nn.Module): - """Enhanced model with optional OOD detection capabilities""" - - def __init__( - self, - base_model, - num_labels, - use_spectral_norm=False, - hidden_dim=768, - dropout_rate=0.1, - ): - super().__init__() - self.base_model = base_model - self.num_labels = num_labels - self.use_spectral_norm = use_spectral_norm - - # Get the hidden size from the base model - if hasattr(base_model.config, "hidden_size"): - self.hidden_size = base_model.config.hidden_size - else: - self.hidden_size = hidden_dim - - # Additional layers for uncertainty estimation - if use_spectral_norm: - self.uncertainty_head = nn.Sequential( - nn.Dropout(dropout_rate), - SpectralNormalization(nn.Linear(self.hidden_size, hidden_dim)), - nn.ReLU(), - nn.Dropout(dropout_rate), - SpectralNormalization(nn.Linear(hidden_dim, num_labels)), - ) - else: - self.uncertainty_head = None - - def forward(self, input_ids, attention_mask=None, labels=None, **kwargs): - # Get outputs from base model - outputs = self.base_model( - input_ids=input_ids, attention_mask=attention_mask, **kwargs - ) - - if self.uncertainty_head is not None: - # Extract hidden representation for uncertainty head - hidden_state = None - - # Try different ways to get the hidden representation - if hasattr(outputs, "pooler_output") and outputs.pooler_output is not None: - hidden_state = outputs.pooler_output - elif hasattr(outputs, "last_hidden_state"): - # Use [CLS] token representation (first token) - hidden_state = outputs.last_hidden_state[:, 0, :] - elif ( - hasattr(outputs, "hidden_states") and outputs.hidden_states is not None - ): - # Use the last hidden state if available - hidden_state = outputs.hidden_states[-1][:, 0, :] - else: - # Fallback: try to find any tensor that looks like hidden states - for attr_name in ["logits", "prediction_logits"]: - if hasattr(outputs, attr_name): - attr_value = getattr(outputs, attr_name) - if ( - isinstance(attr_value, torch.Tensor) - and len(attr_value.shape) >= 2 - ): - # Use the raw logits and add a linear layer to get hidden representation - if attr_value.shape[-1] == self.num_labels: - # This is likely the classification logits, skip uncertainty head - break - else: - raise ValueError( - f"Could not extract hidden state from model outputs: {type(outputs)}" - ) - - if hidden_state is not None: - # Apply uncertainty head - logits = self.uncertainty_head(hidden_state) - # Create a new output object with updated logits - outputs.logits = logits - - return outputs - - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -logger.info(f"TRAINING HARDWARE {device}") - - -class TrainingPipeline: - def __init__(self, dfs, model_name, ood_method=None, ood_config=None): - self.model_name = model_name - self.dfs = dfs - self.ood_method = ood_method - self.ood_config = ood_config or {} - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - # Validate model name - if model_name not in SUPPORTED_BASE_MODELS: - raise ValueError( - f"Unsupported model: {model_name}. Supported: {SUPPORTED_BASE_MODELS}" - ) - - # Initialize base model - self.base_model = self._initialize_base_model(model_name) - - def _initialize_base_model(self, model_name): - """Initialize the base model based on model name""" - config = MODEL_CONFIGS[model_name] - - if config["type"] == "bert": - model = BertForSequenceClassification.from_pretrained(config["model_name"]) - self._freeze_and_unfreeze_bert_layers(model) - - elif config["type"] == "roberta": - model = XLMRobertaForSequenceClassification.from_pretrained( - config["model_name"] - ) - self._freeze_and_unfreeze_roberta_layers(model) - - elif config["type"] == "distilbert": - model = DistilBertForSequenceClassification.from_pretrained( - config["model_name"] - ) - self._freeze_and_unfreeze_distilbert_layers(model) - - else: - raise ValueError(f"Unknown model type: {config['type']}") - - return model - - def _freeze_and_unfreeze_bert_layers(self, model): - """Helper method for BERT-based models""" - if hasattr(model, "bert"): - bert_model = model.bert - elif hasattr(model, "base_model"): - bert_model = model.base_model - else: - # Fallback - assume the model itself is the BERT model - bert_model = model - - # Freeze all parameters first - for param in bert_model.parameters(): - param.requires_grad = False - - # Unfreeze last 2 layers - if hasattr(bert_model, "encoder") and hasattr(bert_model.encoder, "layer"): - for param in bert_model.encoder.layer[-2:].parameters(): - param.requires_grad = True - - # Unfreeze classifier if it exists - if hasattr(model, "classifier"): - for param in model.classifier.parameters(): - param.requires_grad = True - - def _freeze_and_unfreeze_roberta_layers(self, model): - """Helper method for RoBERTa-based models""" - if hasattr(model, "roberta"): - roberta_model = model.roberta - elif hasattr(model, "base_model"): - roberta_model = model.base_model - elif hasattr(model, "encoder"): - roberta_model = model - else: - logger.warning(f"RoBERTa model structure: {type(model)}") - for name, module in model.named_children(): - logger.info(f" - {name}: {type(module)}") - if hasattr(module, "encoder"): - roberta_model = module - break - else: - roberta_model = model - - # Freeze all parameters first - for param in roberta_model.parameters(): - param.requires_grad = False - - encoder = None - if hasattr(roberta_model, "encoder"): - encoder = roberta_model.encoder - elif hasattr(roberta_model, "transformer"): - encoder = roberta_model.transformer - - if encoder and hasattr(encoder, "layer"): - for param in encoder.layer[-2:].parameters(): - param.requires_grad = True - else: - logger.warning("Could not find encoder layers to unfreeze") - - # Unfreeze classifier if it exists - if hasattr(model, "classifier"): - for param in model.classifier.parameters(): - param.requires_grad = True - - def _freeze_and_unfreeze_distilbert_layers(self, model): - """Helper method for DistilBERT-based models""" - if hasattr(model, "distilbert"): - distilbert_model = model.distilbert - elif hasattr(model, "base_model"): - distilbert_model = model.base_model - else: - distilbert_model = model - - # Freeze all parameters first - for param in distilbert_model.parameters(): - param.requires_grad = False - - # Unfreeze last 2 layers - if hasattr(distilbert_model, "transformer") and hasattr( - distilbert_model.transformer, "layer" - ): - for param in distilbert_model.transformer.layer[-2:].parameters(): - param.requires_grad = True - - # Unfreeze classifier if it exists - if hasattr(model, "classifier"): - for param in model.classifier.parameters(): - param.requires_grad = True - - def preprocess_conversation(self, text): - """Preprocess conversation data""" - text = str(text).strip() - - # Handle common conversation patterns - text = text.replace("\n", " [SEP] ") - text = text.replace("\t", " ") - - # Limit length for transformer models - max_length = 400 - if len(text.split()) > max_length: - words = text.split()[:max_length] - text = " ".join(words) - - return text - - def tokenize_data(self, data, tokenizer): - """Enhanced tokenization for conversation data""" - processed_data = [self.preprocess_conversation(text) for text in data] - - tokenized = tokenizer.batch_encode_plus( - processed_data, - truncation=True, - padding=True, - max_length=512, - return_token_type_ids=False, - return_attention_mask=True, - return_tensors="pt", - ) - return tokenized - - def data_split(self, df): - """Improved data split for flat classification""" - unique_classes = df["target"].unique() - - if len(df) < 100: - # For small datasets, ensure each class has representation - train_samples = [] - test_samples = [] - - for class_name in unique_classes: - class_samples = df[df["target"] == class_name] - - if len(class_samples) == 1: - train_samples.append(class_samples) - else: - train_class, test_class = train_test_split( - class_samples, test_size=TEST_SIZE_RATIO, random_state=42 - ) - train_samples.append(train_class) - test_samples.append(test_class) - - train_df = pd.concat(train_samples) if train_samples else pd.DataFrame() - test_df = pd.concat(test_samples) if test_samples else pd.DataFrame() - - # If test set is empty, duplicate some training samples - if len(test_df) == 0: - test_df = train_df.sample(min(len(train_df), 5), random_state=42) - else: - # For larger datasets, use stratified split - try: - train_df, test_df = train_test_split( - df, - test_size=TEST_SIZE_RATIO, - random_state=42, - stratify=df["target"], - ) - except ValueError: - train_df, test_df = train_test_split( - df, test_size=TEST_SIZE_RATIO, random_state=42 - ) - - return train_df, test_df - - def get_tokenizer_and_model_for_training(self, model_name, num_labels): - """Get appropriate tokenizer and model for training""" - config = MODEL_CONFIGS[model_name] - - if config["type"] == "bert": - model = BertForSequenceClassification.from_pretrained( - config["model_name"], - num_labels=num_labels, - ignore_mismatched_sizes=True, - ) - tokenizer = BertTokenizer.from_pretrained(config["tokenizer_name"]) - self._freeze_and_unfreeze_bert_layers(model) - - elif config["type"] == "roberta": - model = XLMRobertaForSequenceClassification.from_pretrained( - config["model_name"], - num_labels=num_labels, - ignore_mismatched_sizes=True, - ) - tokenizer = XLMRobertaTokenizer.from_pretrained(config["tokenizer_name"]) - self._freeze_and_unfreeze_roberta_layers(model) - - elif config["type"] == "distilbert": - model = DistilBertForSequenceClassification.from_pretrained( - config["model_name"], - num_labels=num_labels, - ignore_mismatched_sizes=True, - ) - tokenizer = DistilBertTokenizer.from_pretrained(config["tokenizer_name"]) - self._freeze_and_unfreeze_distilbert_layers(model) - - else: - raise ValueError(f"Unknown model type: {config['type']}") - - return model, tokenizer - - def replicate_data(self, df, target_size): - """Replicate data to reach target size""" - if len(df) >= target_size: - return df - - multiplier = target_size // len(df) + 1 - replicated_dfs = [df] * multiplier - replicated_df = pd.concat(replicated_dfs, ignore_index=True) - replicated_df = replicated_df.sample(n=target_size, random_state=42) - return replicated_df - - def extract_model_components(self, model): - """Extract model components based on model type with robust error handling""" - config = MODEL_CONFIGS[self.model_name] - - try: - if config["type"] == "bert": - # Try different possible BERT model structures - bert_layers = None - if hasattr(model, "bert") and hasattr(model.bert, "encoder"): - bert_layers = model.bert.encoder.layer[-2:].state_dict() - elif hasattr(model, "base_model") and hasattr( - model.base_model, "encoder" - ): - bert_layers = model.base_model.encoder.layer[-2:].state_dict() - else: - logger.warning( - "Could not extract BERT layers, using pattern matching" - ) - bert_layers = { - k: v - for k, v in model.state_dict().items() - if "encoder.layer.10" in k or "encoder.layer.11" in k - } - - classifier_layers = ( - model.classifier.state_dict() - if hasattr(model, "classifier") - else {} - ) - return bert_layers, classifier_layers - - elif config["type"] == "roberta": - # Handle XLM-RoBERTa structure - roberta_layers = None - - # First try to find the main transformer layers - if hasattr(model, "roberta") and hasattr(model.roberta, "encoder"): - roberta_layers = model.roberta.encoder.layer[-2:].state_dict() - elif hasattr(model, "base_model") and hasattr( - model.base_model, "encoder" - ): - roberta_layers = model.base_model.encoder.layer[-2:].state_dict() - else: - # For XLM-RoBERTa, extract by parameter name pattern - logger.warning("Using pattern matching for XLM-RoBERTa layers") - roberta_layers = {} - for name, param in model.named_parameters(): - # Extract last 2 encoder layers (typically layer 10 and 11 for base models) - if "encoder.layer.10." in name or "encoder.layer.11." in name: - # Remove the model prefix to get relative layer name - layer_name = name.split("encoder.layer.")[-1] - roberta_layers[f"encoder.layer.{layer_name}"] = ( - param.data.clone() - ) - - classifier_layers = ( - model.classifier.state_dict() - if hasattr(model, "classifier") - else {} - ) - return roberta_layers, classifier_layers - - elif config["type"] == "distilbert": - # Try different possible DistilBERT model structures - distilbert_layers = None - if hasattr(model, "distilbert") and hasattr( - model.distilbert, "transformer" - ): - distilbert_layers = model.distilbert.transformer.layer[ - -2: - ].state_dict() - elif hasattr(model, "base_model") and hasattr( - model.base_model, "transformer" - ): - distilbert_layers = model.base_model.transformer.layer[ - -2: - ].state_dict() - else: - logger.warning( - "Could not extract DistilBERT layers, using pattern matching" - ) - distilbert_layers = { - k: v - for k, v in model.state_dict().items() - if "transformer.layer.4." in k or "transformer.layer.5." in k - } # DistilBERT has 6 layers - - classifier_layers = ( - model.classifier.state_dict() - if hasattr(model, "classifier") - else {} - ) - return distilbert_layers, classifier_layers - - else: - raise ValueError(f"Unknown model type: {config['type']}") - - except Exception as e: - logger.error(f"Error extracting model components: {e}") - # Fallback: return relevant parts of model state dict - logger.warning("Using fallback: extracting layers by name pattern") - - model_layers = {} - classifier_layers = {} - - for name, param in model.named_parameters(): - if "classifier" in name: - classifier_layers[name] = param.data.clone() - elif "encoder.layer." in name and any( - f"encoder.layer.{i}." in name for i in [10, 11, 4, 5] - ): # Last layers for different models - model_layers[name] = param.data.clone() - - return model_layers, classifier_layers - - def train(self): - """Standard training method""" - classes = [] - accuracies = [] - f1_scores = [] - models = [] - classifiers = [] - label_encoders = [] - - method_name = f"{self.model_name}" + ( - f"-{self.ood_method}" if self.ood_method else "" - ) - logger.info(f"INITIATING TRAINING FOR {method_name}") - - for i in range(len(self.dfs)): - logger.info(f"TRAINING FOR DATAFRAME {i + 1} of {len(self.dfs)}") - current_df = self.dfs[i] - - if len(current_df) < MIN_SAMPLES_PER_CLASS: - current_df = self.replicate_data( - current_df, TARGET_SAMPLES_FOR_SMALL_DATASETS - ).reset_index(drop=True) - - train_df, test_df = self.data_split(current_df) - label_encoder = LabelEncoder() - train_labels = label_encoder.fit_transform(train_df["target"]) - test_labels = label_encoder.transform(test_df["target"]) - - # Get model and tokenizer - model, tokenizer = self.get_tokenizer_and_model_for_training( - self.model_name, len(label_encoder.classes_) - ) - - # Apply OOD enhancements if specified - if self.ood_method == "sngp": - logger.info("Applying spectral normalization enhancement") - model = EnhancedModel( - base_model=model, - num_labels=len(label_encoder.classes_), - use_spectral_norm=True, - ) - - train_encodings = self.tokenize_data(train_df["input"].tolist(), tokenizer) - test_encodings = self.tokenize_data(test_df["input"].tolist(), tokenizer) - - # Prepare OOD labels if needed - train_ood_labels = None - test_ood_labels = None - if self.ood_method in ["energy", "sngp"]: - # All samples are ID (in practice, you'd have real OOD data) - train_ood_labels = np.zeros(len(train_labels)) - test_ood_labels = np.zeros(len(test_labels)) - - train_dataset = CustomDataset( - train_encodings, train_labels, train_ood_labels - ) - test_dataset = CustomDataset(test_encodings, test_labels, test_ood_labels) - - # Setup training arguments - training_args = TrainingArguments( - output_dir="tmp", - **DEFAULT_TRAINING_ARGS, - disable_tqdm=False, - ) - - # Use custom trainer for OOD methods - if self.ood_method in ["energy", "sngp"]: - trainer = self._get_ood_trainer( - model, training_args, train_dataset, test_dataset - ) - else: - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset, - eval_dataset=test_dataset, - compute_metrics=lambda eval_pred: { - "accuracy": accuracy_score( - eval_pred.label_ids, eval_pred.predictions.argmax(axis=1) - ) - }, - ) - - trainer.train() - - # Extract model components - if hasattr(model, "base_model"): - # Enhanced model - base_model = model.base_model - layer_components, classifier_components = self.extract_model_components( - base_model - ) - else: - layer_components, classifier_components = self.extract_model_components( - model - ) - - models.append(layer_components) - classifiers.append(classifier_components) - - # Evaluate model - predictions, labels, _ = trainer.predict(test_dataset) - predictions = predictions.argmax(axis=-1) - report = classification_report( - labels, - predictions, - target_names=label_encoder.classes_, - output_dict=True, - zero_division=0, - ) - - # Log results - logger.info(f"Classification Results for {method_name}:") - for cls in label_encoder.classes_: - if cls in report: - precision = report[cls]["precision"] - f1 = report[cls]["f1-score"] - logger.info( - f" Class '{cls}': Precision={precision:.3f}, F1={f1:.3f}" - ) - - classes.append(cls) - accuracies.append(precision) - f1_scores.append(f1) - - label_encoders.append(label_encoder) - - # Clean up - if os.path.exists("tmp"): - shutil.rmtree("tmp") - - basic_model = self.base_model.state_dict() - metrics = (classes, accuracies, f1_scores) - return metrics, models, classifiers, label_encoders, basic_model - - def _get_ood_trainer(self, model, training_args, train_dataset, test_dataset): - """Get custom trainer for OOD methods""" - - class OODTrainer(Trainer): - def __init__(self, ood_method, ood_config, **kwargs): - super().__init__(**kwargs) - self.ood_method = ood_method - self.ood_config = ood_config - - if ood_method == "energy": - config = DEFAULT_OOD_CONFIGS["energy"] - config.update(ood_config) - self.ood_loss = OODLoss( - ood_weight=config.get("energy_weight", 0.1), - energy_margin=config.get("energy_margin", 10.0), - temperature=config.get("energy_temp", 1.0), - ) - - def compute_loss(self, model, inputs, return_outputs=False): - labels = inputs.get("labels") - ood_labels = inputs.get("ood_labels") - - outputs = model( - **{ - k: v - for k, v in inputs.items() - if k not in ["labels", "ood_labels"] - } - ) - logits = outputs.get("logits") - - if ood_labels is not None and self.ood_method == "energy": - loss = self.ood_loss(logits, labels, ood_labels) - else: - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) - - return (loss, outputs) if return_outputs else loss - - return OODTrainer( - ood_method=self.ood_method, - ood_config=self.ood_config, - model=model, - args=training_args, - train_dataset=train_dataset, - eval_dataset=test_dataset, - compute_metrics=lambda eval_pred: { - "accuracy": accuracy_score( - eval_pred.label_ids, eval_pred.predictions.argmax(axis=1) - ) - }, - ) - - -def create_training_pipeline(dfs, model_name, ood_method=None, **ood_config): - """ - Factory function to create training pipelines. - - Args: - dfs: List of dataframes - model_name: Name of the base model - ood_method: OOD method ("energy", "sngp", "softmax", or None for standard) - **ood_config: Additional OOD configuration parameters - - Returns: - TrainingPipeline: Configured training pipeline - """ - if ood_method and ood_method not in SUPPORTED_OOD_METHODS: - raise ValueError( - f"Unsupported OOD method: {ood_method}. Supported: {SUPPORTED_OOD_METHODS}" - ) - - return TrainingPipeline( - dfs=dfs, model_name=model_name, ood_method=ood_method, ood_config=ood_config - ) diff --git a/src/training/README.md b/src/training/README.md index 536c2960..28b1e9be 100644 --- a/src/training/README.md +++ b/src/training/README.md @@ -1,42 +1,498 @@ -# Agency Classification Model Project +# Training Pipeline Documentation -Training folder contains code and resources for evaluating transformer-based models (BERT, RoBERTa, XLM-RoBERTa) for classifying agency-based conversations. +## Overview -## Project Structure +The Global Classifier Training Pipeline is a comprehensive machine learning pipeline designed to train and evaluate multiple transformer-based text classification models. It supports multi-model training, automatic model selection, MLflow experiment tracking, ONNX export, and S3 deployment. + +## Features + +- **Multi-Model Training**: Train multiple transformer models (BERT, RoBERTa, XLM, etc.) simultaneously +- **Automatic Best Model Selection**: Selects the best performing model based on F1 score +- **MLflow Integration**: Complete experiment tracking with metrics, parameters, and artifacts +- **ONNX Export**: Converts best model to ONNX format for optimized inference +- **S3 Integration**: Automatic upload of trained models to S3 storage +- **Early Stopping**: Prevents overfitting with validation-based early stopping +- **Comprehensive Logging**: Detailed logging with rotation and retention policies +- **Docker Support**: Fully containerized training environment +- **Cron Job Integration**: Automated training scheduling through cron manager + +## Architecture ``` -project_root/ -├── data/ -│ ├── raw/ # Raw conversation data -│ ├── processed/ # Processed and split datasets -│ └── splits/ # Train/val/test data splits -├── models/ -│ └── saved/ # Saved model checkpoints +Training Pipeline +├── Data Loading & Preprocessing +├── Multi-Model Training Loop +├── Model Evaluation & Selection +├── Best Model Processing +│ ├── ONNX Export +│ └── S3 Upload +├── Results Aggregation +└── Job Status Updates +``` + +## Directory Structure + +``` +src/training/ ├── scripts/ -│ ├── train.py # Training script for transformer models -│ ├── evaluate.py # Evaluation script for model assessment -│ ├── inference.py # Inference performance benchmarking -│ ├── utils.py # Utility functions used across scripts -│ ├── mlflow_log.py # MLflow logging and experiment tracking -│ └── create_datasets.py # Dataset preparation script -├── experiments/ -│ ├── bert/ # BERT experiment configurations and results -│ ├── roberta/ # RoBERTa experiment configurations and results -│ └── xlm/ # XLM-RoBERTa experiment configurations and results -├── mlruns/ # MLflow tracking directory -├── ANALYSIS.md # Comprehensive analysis of model performance -├── DECISION.md # Final decision on model selection -├── requirements.txt # Project dependencies -└── README.md # This file +│ ├── train.py # Main training script +│ ├── utils.py # Utility functions +│ ├── constants.py # Configuration constants +│ ├── s3_utility_handler.py # S3 operations +│ └── create_datasets.py # Dataset processing +├── logs/ # Training logs +├── dataset_artifacts/ # Processed datasets +└── models/ # Trained model outputs +``` + +## Installation & Setup + +### Prerequisites + +- Python 3.8+ +- CUDA (optional, for GPU training) +- Docker & Docker Compose +- MLflow server +- S3-compatible storage + +### Environment Setup + +1. **Clone the repository**: + ```bash + git clone + cd Global-Classifier + ``` + +2. **Build Docker containers**: + ```bash + docker-compose build + ``` + +3. **Start services**: + ```bash + docker-compose up -d + ``` + +## Configuration + +### Model Configuration + +Edit `src/training/scripts/constants.py` to configure supported models: + +```python +MODEL_CONFIG = { + "bert": { + "name": "bert-base-uncased", + "max_length": 512 + }, + "roberta": { + "name": "roberta-base", + "max_length": 512 + }, + "xlm": { + "name": "xlm-roberta-base", + "max_length": 256 + } +} ``` +### Training Parameters + +Key training parameters in `constants.py`: + +```python +# Dataset processing +TEST_SIZE = 0.2 +VALIDATION_SIZE = 0.1 +RANDOM_STATE = 42 + +# Logging configuration +LOG_DIRECTORY = "/app/src/training/logs" +ROTATION_SIZE = "50 MB" +RETENTION_PERIOD = "7 days" +``` + +## Usage + +### Command Line Training + +#### Single Model Training + +```bash +python src/training/scripts/train.py \ + --model_types '["bert"]' \ + --model_id 123 \ + --job_id 456 \ + --dataset_id "3" \ + --data_dir "data/processed" \ + --output_dir "models/output" \ + --num_epochs 3 \ + --batch_size 16 \ + --learning_rate 2e-5 \ + --mlflow_tracking_uri "http://mlflow:5000" +``` + +#### Multi-Model Training + +```bash +python src/training/scripts/train.py \ + --model_types '["bert", "roberta", "xlm"]' \ + --model_id 123 \ + --job_id 456 \ + --dataset_id "3" \ + --data_dir "data/processed" \ + --output_dir "models/output" \ + --num_epochs 3 \ + --batch_size 16 \ + --learning_rate 2e-5 \ + --mlflow_tracking_uri "http://mlflow:5000" +``` + +### Docker Training + +```bash +docker exec -it cron-manager python3 /app/src/training/scripts/train.py \ + --model_types '["bert", "roberta"]' \ + --model_id 123 \ + --job_id 456 \ + --dataset_id "3" \ + --data_dir "/app/data/processed" \ + --output_dir "/app/models" \ + --mlflow_tracking_uri "http://mlflow:5000" +``` + +### Automated Training via Cron + +The pipeline supports automated training through the cron manager: + +```bash +# Trigger training via cron script +./DSL/CronManager/script/train_script_starter.sh +``` + +## Command Line Arguments + +| Argument | Type | Required | Description | +|----------|------|----------|-------------| +| `--model_types` | str | Yes | JSON array of model types to train | +| `--model_id` | int | Yes | Unique identifier for the model | +| `--job_id` | int | Yes | Unique identifier for the training job | +| `--dataset_id` | str | Yes | Dataset identifier | +| `--data_dir` | str | No | Path to processed datasets (default: "data/processed") | +| `--output_dir` | str | Yes | Path to save model outputs | +| `--num_epochs` | int | No | Number of training epochs (default: 3) | +| `--batch_size` | int | No | Training batch size (default: 16) | +| `--learning_rate` | float | No | Learning rate (default: 2e-5) | +| `--weight_decay` | float | No | Weight decay (default: 0.01) | +| `--warmup_ratio` | float | No | Warmup ratio (default: 0.1) | +| `--max_seq_length` | int | No | Maximum sequence length (default: 128) | +| `--seed` | int | No | Random seed (default: 42) | +| `--no_cuda` | flag | No | Disable CUDA usage | +| `--mlflow_tracking_uri` | str | No | MLflow tracking server URI | + +## Training Pipeline Workflow + +### 1. Data Loading & Preprocessing + +```python +# Downloads dataset from S3 if needed +splits, label_mappings = load_data_from_dataset_folder( + dataset_id, data_dir, download_from_s3=True +) +``` + +- Downloads aggregated dataset from S3 +- Processes raw data using `ScalableDatasetProcessor` +- Creates train/validation/test splits +- Validates processed dataset integrity + +### 2. Multi-Model Training + +```python +for model_type in model_types: + model_result = train_single_model( + model_type=model_type, + train_texts=train_texts, + train_labels=train_labels, + # ... other parameters + ) +``` + +For each model type: +- Initializes tokenizer and model +- Creates data loaders +- Sets up optimizer and scheduler +- Trains with early stopping +- Evaluates on test set +- Measures inference time + +### 3. Model Selection & Processing + +```python +# Select best model based on F1 score +best_model = max(results, key=lambda x: x["test_metrics"]["f1"]) + +# Export to ONNX +onnx_path = convert_model_to_onnx(best_model_path) + +# Upload to S3 +s3_path = s3_service.upload_trained_model(best_model_path, model_id) +``` + +### 4. Results Aggregation + +```python +results_summary = create_results_summary( + all_model_results, best_overall_model, dataset_id, model_id +) +``` + +## Output Structure + +### Training Results + +```json +{ + "training_summary": { + "dataset_id": "3", + "model_id": 123, + "timestamp": "2024-01-15T10:30:00", + "total_models_attempted": 3, + "successful_models": 3, + "failed_models": 0, + "best_overall_model": "roberta", + "best_overall_f1": 0.9245 + }, + "model_results": { + "bert": { + "status": "success", + "test_metrics": {"f1": 0.9123, "accuracy": 0.9234}, + "training_time_seconds": 1800, + "inference_time_seconds": 0.023 + } + }, + "model_comparison": { + "ranking_by_f1": [ + {"model_type": "roberta", "f1": 0.9245}, + {"model_type": "bert", "f1": 0.9123} + ] + } +} +``` + +### Model Artifacts + +``` +models/model_123/ +├── model_roberta/ +│ ├── roberta_epoch_2/ # Best checkpoint +│ │ ├── pytorch_model.bin +│ │ ├── config.json +│ │ ├── tokenizer.json +│ │ ├── label_mappings.json +│ │ └── model.onnx +│ └── training_logs.txt +├── model_bert/ +│ └── ... +└── training_summary.json +``` + +## MLflow Integration + +### Experiment Tracking + +Each model training creates a separate MLflow experiment: + +``` +Experiment: text_classification_bert_dataset_3 +├── Run: bert_dataset_3_20240115_103000 +│ ├── Parameters: model_type, learning_rate, batch_size, etc. +│ ├── Metrics: train_loss, val_f1, test_accuracy, etc. +│ └── Artifacts: model, confusion_matrix, logs +``` + +### Logged Metrics + +- **Training**: `train_loss` (per epoch) +- **Validation**: `val_f1`, `val_accuracy`, `val_precision`, `val_recall` (per epoch) +- **Test**: `test_f1`, `test_accuracy`, `test_precision`, `test_recall` +- **Performance**: `training_time_seconds`, `avg_inference_time_seconds` +- **Model**: `num_parameters` + +### Accessing MLflow + +```bash +# View experiments in MLflow UI +http://localhost:5000 + +## Monitoring & Logging + +### Log Levels + +- **DEBUG**: Detailed execution information +- **INFO**: General process information +- **WARNING**: Non-critical issues +- **ERROR**: Error conditions +- **CRITICAL**: Critical failures + +### Log Files + +``` +src/training/logs/ +├── training_20240115.log # Daily rotation +├── training_20240114.log +└── ... +``` + +### Real-time Monitoring + +```bash +# Follow training logs +docker exec -it cron-manager tail -f /app/src/training/logs/training_$(date +%Y%m%d).log + +## Error Handling & Troubleshooting + +### Common Issues + +#### 1. CUDA Out of Memory + +```bash +# Reduce batch size +--batch_size 8 + +# Use CPU training +--no_cuda +``` + +#### 2. Dataset Loading Errors + +```bash +# Check dataset exists in S3 +aws s3 ls s3://your-bucket/datasets/dataset_3/ + +# Verify processed dataset structure +ls -la data/processed/dataset_3/ +``` + +#### 3. MLflow Connection Issues + +```bash +# Check MLflow server status +curl http://mlflow:5000/health + +# Update tracking URI +--mlflow_tracking_uri "http://localhost:5000" +``` + +#### 4. Import/Path Issues + +```bash +# Set PYTHONPATH in container +export PYTHONPATH="/app:/app/src:/app/src/training:$PYTHONPATH" + +# Check imports manually +python -c "from scripts.constants import MODEL_CONFIG; print('OK')" +``` + +### Debug Mode + +Enable detailed debugging: + +```python +# In constants.py +LOG_LEVEL = "DEBUG" + +# Or set environment variable +export LOG_LEVEL=DEBUG +``` + +### Performance Optimization + +#### GPU Optimization + +```python +# Use mixed precision training +--fp16 + +# Optimize batch size for GPU memory +--batch_size 32 # Adjust based on GPU memory +``` + +#### Training Speed + +```python +# Reduce epochs for quick testing +--num_epochs 1 + +# Use smaller max sequence length +--max_seq_length 64 + +# Disable inference time measurement +# Comment out measure_inference_time() calls +``` + +## API Integration + +### Job Status Updates + +The pipeline automatically updates job status via REST API: + +```python +# Update status to "training-in-progress" +update_job_status(job_id=456, status="training-in-progress") + +# Update status to "trained" on success +update_job_status(job_id=456, status="trained") + +# Update status to "failed" on error +update_job_status(job_id=456, status="failed") +``` + +### Result Storage + +Training results are stored in: +- Local JSON files +- MLflow experiments +- Database (via API endpoints) +- S3 storage (model artifacts) + +### Integration Tests + +```bash +# Test full pipeline with sample data +python src/training/scripts/train.py \ + --model_types '["bert"]' \ + --dataset_id "test_dataset" \ + --num_epochs 1 \ + --batch_size 2 +``` + +## Best Practices + +### Model Selection + +1. **Use appropriate batch sizes**: Start with 16, adjust based on GPU memory +2. **Monitor validation metrics**: Use early stopping to prevent overfitting +3. **Compare multiple models**: Train BERT, RoBERTa, and XLM for best results +4. **Validate on diverse data**: Ensure test set represents real-world distribution + +### Resource Management -## Models +1. **GPU Memory**: Monitor with `nvidia-smi`, reduce batch size if needed +2. **Disk Space**: Clean up old model checkpoints regularly +3. **Logging**: Configure appropriate log retention policies +4. **MLflow Storage**: Archive old experiments periodically -We evaluate the following transformer-based models: +### Production Deployment -- **BERT** (`bert-base-uncased`): A bidirectional transformer pre-trained on English text. -- **RoBERTa** (`roberta-base`): An optimized version of BERT with improved training methodology. -- **XLM-RoBERTa** (`xlm-roberta-base`): A multilingual version of RoBERTa trained on 100 languages. +1. **Model Validation**: Always validate model performance before deployment +2. **ONNX Export**: Use ONNX models for optimized inference +3. **A/B Testing**: Compare new models with existing production models +4. **Monitoring**: Set up alerts for model performance degradation +### Testing +- Write unit tests for new functions +- Add integration tests for pipeline changes +- Test with different model types and datasets +- Validate performance benchmarks \ No newline at end of file diff --git a/src/training/dataset_artifacts/3.json b/src/training/dataset_artifacts/3.json deleted file mode 100644 index a52abcfb..00000000 --- a/src/training/dataset_artifacts/3.json +++ /dev/null @@ -1,2230 +0,0 @@ -{ - "aggregated_data": [ - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "1", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohtuotsuse kohta, mis on seotud korruptsiooniga ja antud Tallinna Ringkonnakohus 13. oktoobril 2020?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "2", - "question": "Kas ma saan teada, kust saab rohkem infot BRIIS-i süsteemi kasutuselevõtu kohta Eestis?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "3", - "question": "Ma unustasin oma mobiil-ID PIN1 koodi, kuidas ma saan selle taastada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "4", - "question": "Mul on küsimus, kuidas ma saan broneeringu piirijärjekorda?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "5", - "question": "Mul on Islandi kodanikuna tähtajaline elamisõigus Eestis. Kas ma pean selle pikendamiseks midagi tegema, või pikeneb see automaatselt?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "6", - "question": "Mul on vaja Mobiil-ID taotleda, aga kust ma alustan? Kas ma pean selleks midagi eriti keerulist tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "7", - "question": "Meil on e-teenus, kus tahaks mobiil-ID abil digiallkirju võimaldada. Kust alustada ja milliseid dokumente meil vaja läheb?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "8", - "question": "Mul on 16-aastane poeg, kelle elamisluba saab pea kehtima. Kuidas ma saan tema elamisluba pikendada, et ta saaks jätkuvalt minu juures elada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "9", - "question": "Mul on digiallkiri, aga ei tea, kuidas sellega dokument arvutis allkirjastada, kas sa saad mind juhendada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "10", - "question": "Kust ma saan teada, kas mu Mobiil-ID sertifikaat on veel kehtiv?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "11", - "question": "Kas ma saan nimekirjast leitud kohtulahendite täpsemaid andmeid saada, näiteks kohtuotsuse põhjendust?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "12", - "question": "Ma olen Euroopa Liidu kodanik ja tahan Eestis elama asuda. Kas ma pean kohe ID-kaarti taotlema või on midagi muud, mida ma peaksin kõigepealt tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "13", - "question": "Ma ei saa aru, kas MyID portaali teave on usaldusväärne, kui ma peaksin seda kasutama näiteks pangatehingutega seotud asjade jaoks? On seal mingi garantiit?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "14", - "question": "Meie ettevõte tahab kasutada DigiDoc4 rakendust digiallkirjastamiseks. Kuidas me saame hankida ajatempliteenuse pakkujalt teenuse, kui me ei ole valitsuse asutus?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "15", - "question": "Mul on ID-kaart lukustatud, kuidas ma saan PIN-koodid uuesti aktiveerida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "16", - "question": "Mul on Islandi kodakondsus. Kas ma saan elamisõiguse Eestis tähtajalise elamisõiguse alusel ja mida ma pean selleks tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "17", - "question": "Ma saan aru, et piiriületuseks on võimalik broneering teha? Kuidas ma selle teen?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "18", - "question": "Kas ma saan ID-kaart või digi-ID abil dokumente elektrooniliselt allkirjastada ja kas see on juriidiliselt kehtiv?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "19", - "question": "Kas ma saan vanema digidoki faili, mis on BDOC formaadis, avada uuemas versioonis, mis kasutab ASICE formaati?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "20", - "question": "Mul tuleb DigiDoc4 käivitamisel viga, mis ütleb, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "21", - "question": "Ma tahan elamisluba pikendada, aga kuidas ma peaksin selle taotluse vormistama? Kas saan abi saada selle ankeedi täitmisel?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "22", - "question": "Mul on ettevõtlusega seotud elamisluba ja see läheb pea lõppema. Kuidas ma saan seda pikendada ja millised dokumendid mul selleks vaja on?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "23", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) kaudu abi tagasi pöördumiseks ja kuidas ma selleks saan?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "24", - "question": "Mul on ID-kaart, aga rakendus ei tunne seda. Mida ma saan teha?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "25", - "question": "Mul on küsimus, kuidas ma saan piiriületuse aja broneerida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "26", - "question": "Mul on ID-kaart, aga rakendus ei suuda seda lugeda. Mis ma saan teha?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "27", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja aktiveerida? Kas see on võimalik ka Elisa klientidele?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "28", - "question": "Mul on 16-aastane laps, kes elab koos minuga. Kuidas ma saan tema elamisluba pikendada, et ta saaks siin elada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "29", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohtuotsuse kohta, mille number on 1-20-4858 ja mis on Tallinna Ringkonnakohus 13. oktoobrist 2020?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "30", - "question": "Mul on 16-aastane laps ja tema elamisluba saab pea kehtima. Kuidas ma saan tema elamisloa pikendada, et ta saaks mu juures elada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "31", - "question": "Kas meil on võimalik Riigi allkirjastamisteenusega liituda, kui oleme valitsuse asutus ja tahame digiallkirjastamise võimaldada enda e-teenuses?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "32", - "question": "Meil on ettevõte ja tahame oma veebikeskkonnas võimaldada klientidele dokumentide digiallkirjastamist. Kas me saame kasutada Riigi allkirjastamisteenust selleks ja kuidas sellega liituda?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "33", - "question": "Mul on elamisluba ettevõtluseks suurinvestorina, kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "34", - "question": "Mul on tähtajaline elamisluba ja see lõppeb pea pea. Kuidas ma saan seda pikendada ja milliseid pabereid mul vaja on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "35", - "question": "Kas ma saan digiallkirja anda Mobiil-ID abil ja mis on selleks vaja?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "36", - "question": "Mul on küsimus, kuidas ma saan oma laste nutiseadmete turvalisust parandada, kui nad kasutavad paroolina näiteks sünnipäeva?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "37", - "question": "Kuidas ma saan kontrollida, millised riigi e-teenustes olen hetkel sisse logitud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "38", - "question": "Mul on Mobiil-ID sertifikaat, aga ei tea, kuidas saan kontrollida, kas see on veel kehtiv? Kas saan seda teha ID.ee portaalis?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "39", - "question": "Kas Politsei- ja Piirivalveamet teab midagi kohtuotsusest, mis puudutab kriminaalasja 1-20-4858 ja on leitav riigiteataja.ee-st?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "40", - "question": "Mul on ettevõtluseks elamisluba. Millised dokumendid mul vaja on, et seda pikendada ja kas ma saan selle e-posti teel teha, kui olen viimased 6 aastat sõrmejäljed andnud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "41", - "question": "Mul on 16-aastane laps, kes elab minu juures elamisloaga. Kust ma saan teada, kuidas tema elamisluba pikendada ja millised paberid selleks vaja on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "42", - "question": "Mul on küsimus Mobiil-ID taotlemise kohta. Kust ma saan teada, kas minu mobiilioperaator pakub seda teenust ja mida ma pean selleks tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "43", - "question": "Kuidas ma saan kontrollida, millised riigi e-teenustes on mul aktiivsed sessioonid ja kuidas need lõpetada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "44", - "question": "Mul kadus meresõidutunnistus, mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "45", - "question": "Ma näen, et paljudele on algatatud elamisloa kehtetuks tunnistamise menetlus. Kuidas ma saan teada, kas minult on menetlus algatatud ja mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "46", - "question": "Mul on 16-aastane laps ja tema elamisluba saab pea lõppema. Kuidas ma saan tema elamisloa pikendada, et ta saaks minu juures elada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "47", - "question": "Kas ma saan vanema DigiDoc BDOC faili veel avada ja digitaalselt alla kirjutada, või peab see olema ASICE formaadis?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "48", - "question": "Mul on ID-kaart lukus, kuidas ma saan uued PIN- ja PUK-koodid tellida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "49", - "question": "Mul kadus meremehe teenistusraamat, mida ma pean tegema? Kas ma pean selle kohta mingit avaldust politseile andma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "50", - "question": "Ma elan Berliinis ja tahaksin tellida passi. Kas ma saan selle taotluse saata Berliini esindusse?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "51", - "question": "Ma elan Austraalias ja vajan uut passe. Kas saan selle taotleda Canberras asuvas Eesti välisesinduses?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "52", - "question": "Kas ma saan teada, kas mul on veel kehtiv ajutine reisidokument, mis anti välja enne 18. detsembril 2020?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "53", - "question": "Ma kaotasin oma meresõidutunnistuse. Mida ma pean tegema, et see kehtetuks tunnistada ja uus taotlema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "54", - "question": "Mul on ID-kaart lukus, ma ei saa PIN-koodi sisestada. Kuidas ma saan PUK-koodi abil selle avada ja mida ma pean tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "55", - "question": "Mul on probleem DigiDoc4-ga – see annab teada, et sertifikaatide usaldusnimekirja uuendamine ei õnnestu. Mida ma saan selleks teha?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "56", - "question": "Meil on ettevõte ja tahame kasutada DigiDoc4 rakendust digiallkirjastamiseks. Kuidas me saame hankida vajalikud ajatemplid ja mobiil-ID teenused ärieesmärkidel, kui RIA vahendusteenus on mõeldud ainult tavakasutajatele?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "57", - "question": "Kuidas ma saan kontrollida, millised e-teenused on mul hetkel aktiivsed sessioonid lahti?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "58", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohtuotsuse kohta 1-20-4858, mis on seotud korruptsiooniga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "59", - "question": "Mul on ID-kaart, aga saan vea, kui proovin rakendusega digiallkirja anda. Mida ma saan teha?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "60", - "question": "Kas MyID portaal on usaldusväärne ametlik teabeallikas või on see pigem eksperimentaalne, nagu seal öeldakse?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "61", - "question": "Kust ma saan teada, kas mu Mobiil-ID sertifikaat on veel kehtiv?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "62", - "question": "Ma olen huvitatud Mobiil-ID taotlemisest, aga ma ei saa aru, kust ma selle kohta täpsemat infot leian? Kas ID.ee lehel on selle kohta selged juhendid?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "63", - "question": "Mul on probleem Mobiil-ID allkirjastamisega - DigiDoc4's kuvatakse veateadet 'SSL ühenduskanali loomine ebaõnnestus'. Kuidas ma saan selle ise lahendada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "64", - "question": "Kas MyID portaali teave on alati täpne, või on seal võimalik vigu, kuna see on beetaversioon?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "65", - "question": "Mul on SIM-kaart alles, aga kus ma leian need PIN-koodid, mis SIM-kaardi ümbrisel on? Ma ei tea, kust neid hakata otsima." - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "66", - "question": "Mul on küsimus Mobiil-ID taotlemise kohta. Kas ma saan selle tellida ka Elisa kaudu ja mis on selleks täpne protseduur?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "67", - "question": "Mul on SIM-kaart alles, aga ma ei leia neid PIN-koode, mis SIM-kaardi ümbrise alt kraabida tuli. Kus ma peaksin neid otsima?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "68", - "question": "Mul on Mobiil-ID sertifikaat, aga saan aru, et see kehtib 5 aastat. Kust ma saan teada, kas mu sertifikaat on veel kehtiv?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "69", - "question": "Ma kaotasin oma Mobiil-ID SIM-kaardi. Kuidas saan uued PIN-koodid saada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "70", - "question": "Mul on Mobiil-ID, aga ei saa riigiteenustesse sisse. Mida ma saan teha?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "71", - "question": "Mul on ettevõtlusega seotud elamisluba. Kuidas ma saan selle pikendamise taotlusprotsessi algatada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "72", - "question": "Ma elan Moskvas ja tahaksin tellida passi. Kas ma saan selle taotluse saata Moskva saatkonda?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "73", - "question": "Ma näen siin mitmeid inimesi, kelle elamisloa kehtetuse menetlus on algatatud. Kuidas ma saan teada, mis on selle menetluse täpne tähendus ja mis etappe see sisaldab?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "74", - "question": "Kuidas ma saan ID-tarkvara installitud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "75", - "question": "Mul kadus meremehe teenistusraamat. Mida ma teen, et saada uus ja vana tühistada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "76", - "question": "Mul on ettevõtlusega seotud elamisluba. Kust ma saan teada, milliseid dokumente ma pean selle pikendamiseks esitama?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "77", - "question": "Milliseid veebibrausereid ma saan kasutada ID-kaardi kasutamiseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "78", - "question": "Mul on elamisluba õpinguteks, aga see saab pea lõppema. Kas ma saan Eestis veel töötada, kui ootan selle pikendamisel? Kui palju aega mul on selleks?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "79", - "question": "Ma pean avama vanema digiallkirjastatud dokumenti, mis on BDOC 1.0 formaadis. Kas ID.ee toetab seda endiselt ja kuidas ma saan selle avada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "80", - "question": "Kas Politsei ja Piirivalveamet kasutab BRIIS süsteemi, ja milleks see täpselt mõeldud on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "81", - "question": "Ma soovin digitaalselt dokumente allkirjastada, aga ei tea, kuidas see õigesti käib ja milliseid vahendeid ma selleks kasutada saan?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "82", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja aktiveerida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "83", - "question": "Mul tuli DigiDoc4 käivitamisel viga, kus öeldakse, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Mida ma selle vastu teha saan?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "84", - "question": "Mul on ettevõtlusel põhinev elamisluba ja see hakkab pea lõppema. Kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "85", - "question": "Kust ma saan teada, kuidas riigilõivu tasuma peab, kui ma tahan ID-kaarti välisesinduses taotlema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "86", - "question": "Kuidas ma saan kontrollida, millised riigi e-teenustesse sisselogimised mul aktiivselt on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "87", - "question": "Meil on ettevõte ja tahame kasutada DigiDoc4 rakendust digiallkirjastamiseks. Kuidas me saame mobiil-ID teenuse jaoks lepingu sõlmida SK ID Solutions AS-iga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "88", - "question": "Meie ettevõte tahab hakata kasutama DigiDoc4 rakendust digiallkirjastamiseks. Kuidas me saame mobiil-ID teenuse kasutamiseks lepingu SK ID Solutions AS-iga sõlmida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "89", - "question": "Kust ma saan tellida passi, kui elan Berliinis?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "90", - "question": "Ma elan Moskvas ja tahaksin tellida passi. Kas ma saan selle taotluse saata Moskva Eesti välisesindusse?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "91", - "question": "Ma kaotasin oma meremehe teenistusraamatu, mida ma pean tegema? Kas ma saan selle uue taotleda digitaalselt?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "92", - "question": "Mul on probleem Mobiil-IDga dokumente allkirjastamisel, DigiDoc4 annab veateadet „SSL ühenduskanali loomine ebaõnnestus“. Kuidas ma saan selle ise lahendada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "93", - "question": "Mul on õppimisviisaga Eestis, aga see lõppeb pea pea. Kas ma saan pärast seda veel 270 päeva Eestis olla ja kas ma saan sel ajal töötada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "94", - "question": "Kas ma saan vanema digidokumendi (.bdoc) teisendada .asice formaadiks?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "95", - "question": "Kuidas ma saan oma veebirakendusse integreerida digiallkirja tugi mobiil-ID abil?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "96", - "question": "Mul on õppimisviisuma aeg lõppemas, kas ma saan Eestis veel mingi aja töötada pärast selle kehtivuse lõppu?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "97", - "question": "Mul on ID-kaart, aga ei saa rakenduses PIN-koodi sisestada. Mis võib valesti olla?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "98", - "question": "Mul on Islandi kodakondsus. Kas ma saan tähtajalise elamisõiguse Eestis ja kuidas ma selle saan?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "99", - "question": "Mul on probleem Mobiil-ID-ga dokumentide allkirjastamisel - DigiDoc4 näitab veateadet 'SSL ühenduskanali loomine ebaõnnestus'. Kuidas ma saan selle ise lahendada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "100", - "question": "Ma kaotasin oma PIN-koodi. Kuidas ma saan selle taastada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "101", - "question": "Ma broneerin piiriületuse, aga kuidas ma saan teada, kas ma vajan eelisjärjekorda?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "102", - "question": "Meil on ettevõte ja tahame oma veebiteenuses digiallkirju kasutada. Kuidas me saame sellega alustada ja milliseid võimalusi meil on ID-kaardi, mobiil-ID ja Smart-ID kasutamisega?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "103", - "question": "Kuni millal neid 2014-2021 ajutiste reisidokumentide näidiseid veel kasutatakse?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "104", - "question": "Kas ma saan vanemaid BDOC formaate veel kasutada, või pean üle minema ASICEle?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "105", - "question": "Ma olen Venemaalt tulemas ja tahaksin teada, kuidas ma saan endale eelisjärjekorra broneeringu teha?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "106", - "question": "Mul on probleem Mobiil-ID-ga dokumentide allkirjastamisel - DigiDoc4 näitab mingit SSL ühenduse viga. Kuidas ma selle saan lahendatud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "107", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis on selleks vajalikud sammud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "108", - "question": "Kust ma saan tellida passi, kui elan Austraalias?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "109", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik, kas ta saaks Rahvusvahelise Migratsiooniorganisatsiooni (IOM) kaudu abi tagasi pöördumiseks?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "110", - "question": "Meil on ettevõte ja tahame kasutada DigiDoc4 rakendust dokumentide digiallkirjastamiseks. Kui palju tasuta allkirju meile lubatud kuus?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "111", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja aktiveerida? Mis selleks vaja?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "112", - "question": "Mul on probleem Mobiil-ID-ga dokumente allkirjastamisel - DigiDoc4 annab veateadet \"SSL ühenduskanali loomine ebaõnnestus\". Kuidas ma saan selle ise lahendada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "113", - "question": "Mul kadus meresõidutunnistus, mida ma pean tegema? Kas ma saan selle digitaalselt avalduse kaudu taotleda?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "114", - "question": "Kas ma saan vanema digidokumendi (.bdoc formaadis) veel avada ja kinnitada, või pean selle kohe .asice formaadiks konverteerima?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "115", - "question": "Kas ma saan nende kohtulahendite täiselektronsed koopiad kusagilt alla laadida, mis on loetletud selles dokumendis?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "116", - "question": "Ma näen, et mitmed inimesed on saanud algatatud elamisloa kehtetuks tunnistamise menetlus. Mille põhjal otsustatakse, et menetlus algatatakse?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "117", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) kaudu abi kodumaale tagasi pöördumiseks?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "118", - "question": "Mul on ID-kaart, aga saan veateadet rakenduses. Kuidas ma saan probleemi lahendada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "119", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis on selleks vajalikud sammud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "120", - "question": "Ma kaotasin oma meremehe teenistusraamatu, mida ma pean tegema? Kas ma pean selle kohta avaldust esitama?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "121", - "question": "Mul on Mobiil-ID SIM kaart, aga ma ei leia PIN-koode. Kus ma need leian?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "122", - "question": "Meil on ettevõte ja tahame oma veebiteenuses mobiil-ID abil digiallkirju kasutada. Kust alustama peaksime ja kas teil on selle kohta mingi õpetus või soovitusi?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "123", - "question": "Mul on äriühingu osanikuna elamisluba ettevõtluseks. Kas ma saan selle pikendada e-posti teel, kui olen viimased 5 aastat digitaalselt pingerjaid andnud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "124", - "question": "Mis tähendab see, et Margit Tsekhanovale algatati elamisloa kehtetuks tunnistamise menetlus?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "125", - "question": "Tere! Ma tahan ID-kaarti välisesinduses taotlema minna, aga kuidas ma riigilõivu tasuma saan? Kas ma saan selle kohta rohkem infot?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "126", - "question": "Mul on suurinvestori elamisluba, kuidas ma saan selle pikendada ja millised dokumendid mulle selleks vaja on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "127", - "question": "Mul on ID-kaart lukustatud, kuidas ma saan PIN-koodid uuesti kätte?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "128", - "question": "Vihkan näitlikumalt, mis tähendab, et Sofiya Fridmanel algatati menetlus Välismaalaste seaduse § 241 lg 2 p 2 alusel?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "129", - "question": "Mul on ID-kaart lukustunud, kuidas ma saan PIN-koodi avada ja mida ma pean selleks tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "130", - "question": "Ma olen EL kodanik ja kolisin Eestisse. Kuidas ma saan tähtajalist elamisõigust taotleda ja mida ma selleks pean tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "131", - "question": "Kas ma saan vanemate digiallkirjastatud dokumentide avamiseks kasutada DigiDoc4 rakendust, kui need on loodud enne BDOC 2.1 versiooni?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "132", - "question": "Mul on küsimus MyID portaali kohta - kas see on ikka usaldusväärne, kui see beetaversioon on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "133", - "question": "Mul on ID-kaart, aga ma ei mäleta PUK-koodi. Kuidas ma saan uue koodiümbriku tellida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "134", - "question": "Millal lõpetati nende ajutiste reisidokumentide (kujundusega, mis kehtis 2014-2021) väljastamine?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "135", - "question": "Ma elan Moskvas ja tahan tellida passi. Kas ma saan selle taotluse saata Moskva välisesindusse?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "136", - "question": "Kuni millal neid 2014-2020 ajutiste reisidokumentide näidiseid saab kasutada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "137", - "question": "Ma ei ole kindel, kas MyID portaal on piisavalt täpne ametlikuks kasutamiseks. Kust ma saan kindlaks teha, kas ma saan sellel teenusel põhinevat infot kasutada näiteks deklaratsiooni täitmiseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "138", - "question": "Ma kaotasin oma meresõidutunnistuse. Kuidas ma saan selle kehtetuks tunnistada ja uue taotlema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "139", - "question": "Kuidas ma saan kontrollida, millised e-teenused on hetkel aktiivsed ja kuidas need seansid lõpetada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "140", - "question": "Kas ma saan teada, kuni millal neid 2014-2020 ajutisi reisidokumente veel kehtivad?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "141", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis mulle selleks vaja on?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "142", - "question": "Tere! Ma tahan ID-kaarti välisesinduses taotlema minna. Kust ma saan teada, kuidas riigilõivu seal tasuda saan?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "143", - "question": "Mul on probleem DigiDoc4-ga – see annab teate, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Mida ma peaksin tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "144", - "question": "Mul on küsimus Mobiil-ID kasutamise kohta. Kas ma saan sellega ka dokumente arvutis allkirjastada ja kui jah, siis millist tarkvara ma selleks vajan?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "145", - "question": "Mul on küsimus, kuidas ma saan broneeringu muuta, kui ma saan hiljaks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "146", - "question": "Kelle tähtajalised elamisi tühistati 29. novembril avaldatud teates?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "147", - "question": "Mul on küsimus Mobiil-ID taotlemise kohta. Kas ma saan selle teenuse tellida Telia kaudu ja mida selleks vaja on?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "148", - "question": "Mul on õppimisviisaga Eestis, ja see on lõppemas. Kas ma saan jääda Eestisse veel 270 päeva, ja kas ma tohin sel ajal ka töötada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "149", - "question": "Tere! Ma tahaksin ID-kaarti välisesinduses taotlema minna, aga kuidas ma riigilõivu tasuma pean? Kus ma selle kohta infot saan?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "150", - "question": "Mul kadus meresõidutunnistus, mida ma pean tegema? Kas ma saan selle digitaalselt avalduse kaudu taotleda?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "151", - "question": "Mis tähendab, et Margit Tsekhanova elamisloa menetlus on algatatud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "152", - "question": "Tere! Ma tahan ID-kaarti välisesinduses taotlema minna, aga kuidas ma riigilõivu saan tasuda? Kas ma pean selle kohta kusagilt info leidma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "153", - "question": "Mul on ettevõtlusega seotud elamisluba. Kuidas ma saan seda pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "154", - "question": "Mul on küsimus Mobiil-ID kasutamise kohta. Kuidas ma PIN2-koodi saan sisestada, kui e-teenus küsib seda?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "155", - "question": "Mul on tähtajaline elamisluba ja tahaksin seda pikendada. Kas ma saan selle pikendamise taotluse esitada e-posti teel, kui olen andnud sõrmejäljed viimase kuue aasta jooksul?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "156", - "question": "Ma olen Euroopa Liidu kodanik ja kolisin Eestisse. Kuidas ma saan end ametlikult registreerida ja saada tähtajaline elamisõigus?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "157", - "question": "Mul on suurinvestori elamisluba ja see läheneb lõppu. Kas ma saan selle pikendamist e-posti teel taotleda?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "158", - "question": "Mul on probleem DigiDoc'iga, kui saan vea, et kontrollkood ei kattu? Mis ma peaksin tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "159", - "question": "Ma näen, et mitmel inimesel on elamisluba kehtetuks tunnistamise menetlus käivitunud. Kuidas ma saan teada, miks minu elamisluba võib kehtetuks tunnistatud saada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "160", - "question": "Mul on äriühingu osanikuna ettevõtluseks elamisluba, kuidas ma saan selle pikendada ja milliseid pabereid mul vaja on?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "161", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kuidas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) programmi kaudu tagasi koju pöörduda ja mida see programm täpselt kätkeb?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "162", - "question": "Ma kaotasin oma meresõidutunnistuse. Mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "163", - "question": "Mul on 16-aastane laps, kes elab minuga. Kuidas ma saan tema elamisloa pikendada, et ta saaks ka edaspidi siin elada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "164", - "question": "Ma saan pidevalt kahtlaseid e-kirju, kus on manuseid. Kuidas ma saan aru, kas need on ohuklikud ja mida ma peaksin tegema, kui ma kahtlustan, et see on pahavara?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "165", - "question": "Mul on ettevõtlusloa elamisluba, mis saab pea kehtima. Kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "166", - "question": "Mul on õppimisviisamas välismaalasele, aga see on lõppemas. Kas ma saan Eestis veel 270 päeva töötada pärast selle kehtivuse lõppu?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "167", - "question": "Mul on õppimisviisaga Eestis, ja see on lõppemas. Kas ma saan jääda Eestisse pärast selle kehtivuse lõppu, ja kui jah, siis kuni millise aja jooksul?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "168", - "question": "Ma broneerin piiriületuse, aga kuidas ma saan teada, kui kaua aega ma pean ootama?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "169", - "question": "Kuidas ma saan ID-kaardi lugejat kontrollida, kas see töötab?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "170", - "question": "Kas ma saan nimekirjast kustutada mõne kohtuotsuse lingi, mida ei ole vaja?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "171", - "question": "Ma ei saa aru, kas MyID portaali info on ikka usaldusväärne, kui see on beetaversioonis? Kas ma saan sellega midagi ametlikku asja teha?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "172", - "question": "Kuidas ma saan ID-tarkvara installitud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "173", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohta, mis otsus tehti kriminaalasjas 1-20-4858 ja kus ma saan selle kohta rohkem infot leida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "174", - "question": "Ma näen, et paljudel inimestel on algatatud elamisloa kehtetuks tunnistamise menetlus. Kuidas ma saan teada, miks minu elamisluba kehtetuks tunnistamine algatati?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "175", - "question": "Kuidas ma saan dokumendi digitaalselt allkirjastada Mobiil-ID abil?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "176", - "question": "Ma kaotasin oma meremehe teenistusraamatu, mida ma pean tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "177", - "question": "Ma saan aru, et kõik elektrooniliselt allkirjastatud dokumendid ei ole digiallkirjad. Kuidas ma saan kindlaks teha, kas minu allkiri on juriidiliselt kehtiv digiallkiri?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "178", - "question": "Mul kadus meresõidutunnistus, mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "179", - "question": "Kas Politsei ja Piirivalveamet kasutab BRIIS-i süsteemi praegu ja kui kaua see juba kasutusel on?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "180", - "question": "Mul on 16-aastane laps, kes elab minu juures elamisloa alusel. Kuidas ma saan tema elamisluba pikendada ja millised paberid selleks vaja on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "181", - "question": "Kas ma saan endiselt avada vanemaid digiallkirjastatud dokumente, mis on loodud BDOC 1.0 formaadis, ja kui mitte, siis millega?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "182", - "question": "Kust ma saan teada, kas mu Mobiil-ID sertifikaat on veel kehtiv?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "183", - "question": "Mul kadus meresõidutunnistus, mida ma pean tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "184", - "question": "Kuidas ma saan kontrollida, millised e-teenused on hetkel avatud sessioonidega ja kuidas need sessioonid lõpetada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "185", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja kas sellega saab pangaasjadesse sisse logida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "186", - "question": "Mul on digi-ID kaart, aga ma ei saa aru, kas sellega saadud allkiri on juriidiliselt sama kehtiv nagu omakäeline allkiri. Saaks selgitada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "187", - "question": "Kuidas ma saan veenduda, et minu ID-kaardi lugeja töötab?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "188", - "question": "Mul on ettevõtlusega seotud elamisluba, mis lõppeb pea peaagi. Milliseid dokumente ma pean nüdsel pikendustaotluse jaoks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "189", - "question": "Kas ma saan teada, milliseid andmebaase BRIIS süsteem kasutab, et lennureisijate broneeringuinfot kontrolliks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "190", - "question": "Ma olen mures, et mu laps kasutab liiga lihtsaid paroole. Kas Politsei- ja Piirivalveamet pakub nõu või õpetusi lastele turvaliste paroolide loomisel?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "191", - "question": "Kas ma saan endiselt kasutada BDOC formaati digiallkirjastamiseks või on soovitatav ASICE formaadile üle minna?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "192", - "question": "Mul on küsimus: kui ma saan kahtlase e-kirja, kus on link, kas ma peaksin selle kohta politseile teatama, isegi kui ma ei kliki sellel lingil?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "193", - "question": "Kas Politsei- ja Piirivalveamet kasutab BRIIS-i süsteemi ka praegu ja kas see on kõigis EL riikides kasutusel?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "194", - "question": "Mul on äriühingu osanikuna ettevõtluseks elamisluba. Kuidas ma saan selle pikendada ja milliseid dokumente ma pean esitama?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "195", - "question": "Ma ei saa aru, kuidas see eelisjärjekorra süsteem täpselt toimib. Kuidas saan aru, kas ma olen sellele õigustatud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "196", - "question": "Mul on probleem mobiil-ID PIN-koodidega. Kuidas ma saan neid muuta?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "197", - "question": "Kust ma saan teada, kas mu Mobiil-ID sertifikaat on veel kehtiv?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "198", - "question": "Kust ma saan tellida passi, kui elan Moskvas ja kas ma saan selle tellimiseks kasutada aukonsuli teenuseid?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "199", - "question": "Mul on SIM-kaart alles, aga PIN-koodid jäid meelest. Kuidas ma saan need PIN-koodid nüüd taastada, kui SIM-kaart pole mul alles?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "200", - "question": "Võiksid anda rohkem infot selle kohta, miks Jasvanth Jabez Jeevan David'i elamisluba tühistati?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "201", - "question": "Mul on õppimisviisum lõppemas, kas ma saan Eestis töötada, kui ma alles jään?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "202", - "question": "Mul on ID kaart, aga saan vea teate, kui proovin rakenduses dokumendiga alla kirjutada. Mis võib olla probleemiks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "203", - "question": "Ma olen mures, et mu laps kasutab liiga lihtsaid paroole. Kas Politsei- ja Piirivalveametil on soovitusi, kuidas lapsele turvalise parooli loomist õpetada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "204", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) abist tagasi pöördumisel?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "205", - "question": "Kas Politsei- ja Piirivalveamet teab midagi kohtuotsusest, mis tehti 13. oktoobril 2020 kriminaalasjas 1-20-4858?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "206", - "question": "Mul on õppimisviisuma kehtivus lõppemas, kas ma saan selle aegu Eestis töötada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "207", - "question": "Mul on küsimus mobiil-ID kasutamise kohta. Kuidas ma saan mobiil-ID abil dokumente arvutis digitaalselt alla kirjutada, kui mul ei ole ID-tarkvara?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "208", - "question": "Kuni millal neid 2014-2020 ajutiste reisidokumentide näidiseid saab kasutada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "209", - "question": "Meil on äri, ja tahame DigiDoc4 abil dokumente alla kirjutada. Kui palju tasuta digiallkirju meile ette nähtud, ja kuidas seda piirangut ületada, kui vajame rohkem?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "210", - "question": "Ma kaotasin oma meremehe teenistusraamatu, mida ma pean tegema? Kas ma saan selle digitaalselt avalduse kaudu taotleda?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "211", - "question": "Kuidas ma saan ID-tarkvara installitud ja tööle pandud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "212", - "question": "Mul on SIM-kaart alles, aga ma ei leia PIN-koodi ümbrise alt. Kust ma saan mobiil-ID PIN-koodid, kui ma ei näe neid SIM-kaardi kaasel?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "213", - "question": "Mul on ID-kaart, aga saan vea, kui proovin rakendusega digiallkirjastada. Mida ma saan teha?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "214", - "question": "Kust ma saan teada, kuidas riigilõivu tasuma peab, kui minnakse ID-kaarti taotlema välisesinduses?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "215", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja aktiveerida? Kas ma pean selleks midagi eriti tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "216", - "question": "Mul on 16-aastane poeg, kes elab koos meiega. Kuidas ma saaksin tema elamisloa pikendada, kui see lõppeb kahe kuu jooksul?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "217", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) kaudu abi koju tagasi pöördumiseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "218", - "question": "Ma olen Euroopa Liidu kodanik ja tahan Eestis elama asuda. Kas ma pean kohe politsei juurde ID-kaarti taotlema, või on midagi varem vaja teha?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "219", - "question": "Kas Politsei- ja Piirivalveamet saab anda rohkem infot selle kohtuotsuse kohta, mis on 13. oktoobri 2020 Tallinna Ringkonnakohus kriminaalasjas 1-20-4858?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "220", - "question": "Mul on ettevõtlusega seotud elamisluba, kuidas ma saan selle pikendada ja millised dokumendid mulle selleks vaja lähevad?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "221", - "question": "Kust ma saan teada, kuidas riigilõivu tasuda, kui ma tahan ID-kaarti välisesinduses taotlema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "222", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis on sellega seotud nõudmised?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "223", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis on selleks vajalik?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "224", - "question": "Meie ettevõte tahab hakata kasutama DigiDoc4 rakendust dokumentide digiallkirjastamiseks. Kuidas me saame hankida ligipääsu ettevõtluslikel eesmärkidel vajalikule ajatempliteenusele?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "225", - "question": "Mul on küsimus mobiil-ID kasutamise kohta – kuidas ma saan sellega dokumente arvutis digitaalselt alla kirjutada, kui mul ei ole ID-tarkvara?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "226", - "question": "Võiks teada, kes kõik nimekirjas on ja millal nende elamisluba kehtetuks tunnistati?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "227", - "question": "Kas ma saan nendest kohtulahenditest teada, kas politsei ametnikud olid korduvalt seotud korruptsiooni juhtumitega aastal 2019?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "228", - "question": "Mul on probleem Mobiil-IDga allkirjastamisel, DigiDoc4 annab mingit SSL ühenduse viga. Kuidas ma selle ise saan lahendada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "229", - "question": "Kas ma saan teada, mis seadusrikkumised olid nendes 2019. aasta kohtulahendites kõige sagedamini kohtusse viidud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "230", - "question": "Kas ma saan endiselt avada vanemaid digiallkirjastatud dokumente, mis on loodud BDOC 1.0 formaadis, kui ma ei saa kasutada DigiDoc4 rakendust?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "231", - "question": "Kuidas saan mobiil-ID PIN-koodi muuta?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "232", - "question": "Kas ma saan teada, kuni millal neid 2014-2020 ajutiste reisidokumentide kujundust kasutatakse?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "233", - "question": "Mul on tähtajaline elamisluba, mis lõppeb pea pea peagi. Kuidas ma saan selle pikendada ja milliseid pabereid mul vaja läheb?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "234", - "question": "Kuidas ma Mobiil-ID-d soetan ja mis on selle taotlemise protsess?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "235", - "question": "Meie ettevõte sooviks kasutada DigiDoc4 rakendust digiallkirjastamiseks. Kuidas me saame hankida ajatempliteenuse ja millised on sellega seotud nõuded ärieesmärkidel?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "236", - "question": "Mul on probleem Mobiil-IDga allkirjastamisel, DigiDoc4 viskab veateadet. Kuidas ma saan selle ise lahendada, nagu artiklis kirjutatud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "237", - "question": "Mul kadus meremehe teenistusraamat. Mida ma pean tegema, et saada uus?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "238", - "question": "Mul on Mobiil-ID SIM-kaart, aga ma ei leia PIN-koode. Kus ma need leian?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "239", - "question": "Ma olen mures, et mu laps kasutab kergelt arvatavaid paroole. Kas Politsei- ja Piirivalveamet annab nõu, kuidas lapsele turvalisemate paroolide loomist õpetada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "240", - "question": "Kas ma saan vanemaid BDOC formaate kasutada või pean üle minema ASICE formaadile?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "241", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik, kas ta saaks Rahvusvahelise Migratsiooniorganisatsiooni (IOM) toetusprogrammi kaudu abi tagasi pöördumiseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "242", - "question": "Mul on tähtajaline elamisluba lõppemas, kuidas ma saan selle pikendada ja milliseid pabereid ma pean selleks koguma?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "243", - "question": "Ma ei ole kindel, kas mu Mobiil-ID teave on korrektsene MyID portaalis, kuidas ma saan selle kontrollida või siis parandada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "244", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) kaudu abi tagasi pöördumiseks ja kuidas ma selle programmi jaoks kandideerin?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "245", - "question": "Kuidas ma saan kontrollida, millised e-teenused on hetkel mul sisse logitud ja kuidas need seanssid lõpetada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "246", - "question": "Kas ma saan endiselt kasutada BDOC formaati digiallkirjastamiseks või kas pean üle minema ASICEle?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "247", - "question": "Meil on e-teenus, kus tahaks mobiil-ID abil digiallkirju võimaldada. Kas saaksite anda rohkem infot, kuidas seda integreerida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "248", - "question": "Kas MyID portaal on juba lõplik versioon või on see ikka beetaversioon? Ja kui see on beetaversioon, siis kas ma saan sellel olevat infot ametlikuks kasutuseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "249", - "question": "Ma näen, et paljudel inimestel on algatatud elamisloa kehtetuks tunnistamise menetlus. Millised on kõige levinumad põhjused selleks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "250", - "question": "Ma näen siin palju inimesi, kelle elamisluba on tühistatud. Kuidas üldiselt otsustatakse, et elamisluba tühistada saab?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "251", - "question": "Kas ma saan BRIIS süsteemist teada, kas keegi on varem reisind ja kas temaga on seotud mingeid kriminaalseid andmeid?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "252", - "question": "Mul on ID-kaart, aga saan vea teate, kui proovin rakendusega digiallkirjastada. Mida ma saan teha?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "253", - "question": "Mul on probleem Mobiil-ID allkirjastamisega, DigiDoc4 viskas veateadet \"SSL ühenduskanali loomine ebaõnnestus\". Kuidas ma saan selle ise lahendada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "254", - "question": "Mul on küsimus Mobiil-ID taotlemise kohta. Kas ma saan selle tellida otse ID.ee lehelt või pean ma ühendust mobiilsideoperaatoriga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "255", - "question": "Mul on küsimus Mobiil-ID kasutamise kohta. Kuidas ma PIN-koode saan sisestada, kui ma teen pangatöid mobiiltelefoniga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "256", - "question": "Mul on ID-kaart, aga ma ei mäleta PIN-koode. Kuidas ma saan uue koodiümbriku tellida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "257", - "question": "Kas ma saan veel kasutada BDOC formaadis digiallkirjastatud dokumente või on parem kasutada ASICE formaati?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "258", - "question": "Kuidas saan PIN-koodi vahetada mobiil-ID-s?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "259", - "question": "Ma elan Helsingis ja soovin tellida passi. Kas ma saan selle tellida Helsingis asuva Eesti välisesinduse kaudu?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "260", - "question": "Mul kadus meremehe teenistusraamat, mida ma pean tegema? Kas ma saan selle digitaalselt avaldusega tühistada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "261", - "question": "Kas ma saan ikka vana BDOC 1.0 formaadis digiallkirjastatud dokumente avada ja kuidas?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "262", - "question": "Kas ma saan teada, kas mu ajutine reisidokument, mis mul 2018. aastal välja antud, on veel kehtiv, arvestades, et tekstis öeldakse, et sellise kujundusega dokumendid on kasutusel kuni 2022. aastani?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "263", - "question": "Kõikide nimekirjas olevate isikute puhul, mis on nende elamisloa kehtetuks tunnistamise põhjustanud alus?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "264", - "question": "Mul on ID-kaart lukus, kuidas ma saan PUK-koodi abil selle jälle avada ja mis siis edasi?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "265", - "question": "Meil on veebirakendus, kus tahaksime mobiil-ID abil digiallkirjastamise toega. Kust alustada ja milliseid ressursse me peame vaatama?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "266", - "question": "Mul on küsimus Mobiil-ID kasutamise kohta. Kuidas ma saan digitaalselt dokumente alla kirjutada arvutis, kui mul pole ID-tarkvara?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "267", - "question": "Kuidas ma saan dokumendi digitaalselt alla kirjutada Mobiil-ID abil?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "268", - "question": "Kuidas ma saan kontrollida, millised e-teenustesse sisselogimised mul aktiivsed on ja kuidas need lõpetada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "269", - "question": "Mul on tähtajalise lahkumisettekirjutuse saanud kolmanda riigi kodanik. Kas ma saan Rahvusvahelise Migratsiooniorganisatsiooni (IOM) kaudu abi tagasi pöördumiseks ja kuidas ma selleks pöörduksin?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "270", - "question": "Mul on digiallkiri, aga ma ei tea, kas see on juriidiliselt kehtiv. Kuidas ma saan seda kontrollida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "271", - "question": "Mul on probleem DigiDoc4-ga, see näitab, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "272", - "question": "Tere! Ma tahan ID-kaarti välisesinduses taotlema, aga kuidas ma riigilõivu tasuma pean? Kas ma saan selle kohta infot kusagilt?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "273", - "question": "Mul on 16-aastane poeg, kes elab koos meiega. Kuidas ma saan tema elamisloa pikendada, et ta saaks siin edasi elada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "274", - "question": "Mul on Mobiil-ID sertifikaat, aga ei tea, kuidas saan kontrollida, kas see on veel kehtiv? Kas ma saan seda teha ID.ee-s või kusagil mujal?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "275", - "question": "Mul on ID-kaart lukustatud, kuidas saan PUK-koodi abil selle avada ja tellida uued PIN-koodid?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "276", - "question": "Võiksid palun öelda, kes kõik on loetletud isikute seas, kelle elamisluba on tühistatud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "277", - "question": "Mul on tähtajaline elamisluba ja see hakkab pea lõppema. Kuidas ma saan selle pikendada ja milliseid paberitesse mul vaja läheb?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "278", - "question": "Kas MyID portaali teavet saab kasutada ametlike dokumentide jaoks või on parem pöörduda otse SK ID Solutions AS poole?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "279", - "question": "Kuidas ma saan veenduda, et minu ID-kaardi lugeja töötab?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "280", - "question": "Ma olen Euroopa Liidu kodanik ja kolisin Eestisse. Kuidas ma saan tähtajalise elamisõiguse ametlikult soetada ja kas ma pean selleks kohe ID-kaarti taotlema, kui elukoha registreerin?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "281", - "question": "Väga palun, selgitage, mis tähendab, et kellelegi on algatatud menetlus välismaalase elamisloa kehtetuks tunnistamiseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "282", - "question": "Ma olen mures, et mu laps kasutab nõrku paroole. Kas Politsei- ja Piirivalveamet jagab soovitusi, kuidas lapsele turvalise parooli loomist õpetada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "283", - "question": "Kuidas saan PIN-koodi tagasi, kui olen selle unustanud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "284", - "question": "Mul on probleem DigiDoc4-ga, see näitab, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Kuidas ma saan selle probleemi lahendada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "285", - "question": "Kas need 2019. aasta kohtulahendid, mis puudutavad korruptsiooni ja Politsei- ja Piirivalveameti tegevust, on avalikud ja kas ma saan neid kusagilt alla laadida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "286", - "question": "Mul on ID-kaart, aga saan vea teate, kui proovin rakendusega digiallkirjastada. Mis võib olla viga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "287", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis selleks vaja?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "288", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja aktiveerida? Mis on selleks etapid?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "289", - "question": "Kust ma saan teada, kuidas riigilõivu ID-kaardi taotlemisel välisesinduses tasuma pean?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "290", - "question": "Kas MyID portaalis olev info Mobiil-ID kohta on usaldusväärne, eriti kui ma seda plaanin kasutada mingi olulise asja jaoks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "291", - "question": "Ma olen mures, et mu laps kasutab nõrku paroole. Kas Politsei- ja Piirivalveamet pakub nõuandeid või ressursse lastele turvaliste paroolide loomisel?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "292", - "question": "Meil on ettevõte ja tahame kasutada DigiDoc4 rakendust dokumentide digiallkirjastamiseks. Kuidas me saame hankida ajatempliteenuse ja mobiil-ID teenuse ligipääsu ärieesmärkidel?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "293", - "question": "Kas saaks teada, miks Jasvanth Jabez Jeevan David'i elamisluba tühistati?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "294", - "question": "Mul on ettevõtlusega seotud elamisluba suurinvestorile. Kust ma tean, millal ma peaksin pikendamisega hakkama?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "295", - "question": "Ma saan aru, et mobiil-ID PIN2-koodi küsitakse digiallkirjastamisel, aga kuidas ma saan teada, kas e-teenus onlegi turvaline, enne kui ma selle koodi sisestan?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "296", - "question": "Mul on küsimus Mobiil-ID kasutamise kohta. Kuidas ma saan dokumendile digiallkirja andma telefoniga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "297", - "question": "Ma tahan digitaalselt dokumente allkirjastada, aga ei tea, kas minu Smart-ID on selleks piisav? Mis on selle kehtivus?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "298", - "question": "Mul on tähtajaline elamisluba lõppemas, kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "299", - "question": "Tere! Ma tahan ID-kaarti välisesinduses taotlema minna. Kust ma saan teada, kuidas seal riigilõivu maksta?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "300", - "question": "Kas ma saan teada, kuidas BRIIS süsteem täpselt tööle peaks, ja kas see mõjutab minu lennureisi broneeringuid?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "301", - "question": "Mul on vaja mobiil-ID abil dokumente alla kirjutada, aga ma ei saa aru, kuidas DigiDoc4 rakendust kasutada. Saaksid anda selgemad juhised selle kohta?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "302", - "question": "Kas ma saan endiselt avada vana aja BDOC 1.0 formaadis digiallkirjastatud dokumente, ja kui jah, siis kuidas?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "303", - "question": "Võiksid palun ütelda, kes kõik on nimekirjas loetletud isikute seas, kelle elamisluba on tühistanud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "304", - "question": "Kuidas saan PIN-koodi muuta mobiil-ID-s?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "305", - "question": "Mul on tähtajaline elamisluba lõppemas, kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks olema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "306", - "question": "Ma pean digidokumenti tegema, aga ei tea, kas ma peaksin kasutama BDOC, CDOC või ASICE formaati. Mis vahe neil on ja millist peaks ma valima?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "307", - "question": "Kas ma saan nendest 2019. aasta kohtulahenditest, mis Politsei- ja Piirivalveameti tegevusega seotud on, teada, millised seaduse paragrahvid kõige sagedamini rikkumistena esinesid?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "308", - "question": "Ma olen mures, et mu laps kasutab kergelt arvatavaid paroole. Kuidas Politsei- ja Piirivalveamet vanematele selles osas nõu anda saaks?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "309", - "question": "Kuidas ma saan ID-tarkvara installitud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "310", - "question": "Kuidas ma saan teada, kas mu Mobiil-ID sertifikaat on veel kehtiv ja kust ma leian selle info?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "311", - "question": "Kas ma saan endiselt avada vanemaid digiallkirjastatud dokumente, mis on loodud enne BDOC 2.1, ja kui jah, siis millise tarkvaraga?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "312", - "question": "Ma olen Euroopa Liidu kodanik ja kolisin Eestisse. Kuidas ma saan end ametlikult registreerida ja saada tähtajaline elamisõigus, et saaksin siin kuni 5 aastat elada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "313", - "question": "Mul kadus meremehe teenistusraamat, mida ma pean tegema? Kas ma saan selle digitaalselt avaldusega uue taotleda?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "314", - "question": "Kas need 2019. aasta kohtulahendid korruptsioonijuhtumites puudutavad politsei ja piirivalve ametnike tegevust? Või on need üldisemalt seotud korruptsiooniga?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "315", - "question": "Mul on probleem DigiDoc4 rakendusega, kuidas saan seda lahendada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "316", - "question": "Mul on DigiDoc4-s viga: see ütleb, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "317", - "question": "Mul on tähtajaline elamisluba lõppemas, kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks olema esitama?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "318", - "question": "Mul tuli DigiDoc4 käivitamisel viga, kus öeldakse, et sertifikaatide usaldusnimekirja uuendamine ei õnnestunud. Mida ma pean tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "319", - "question": "Kas ma saan nendest 2019. aasta korruptsioonijuhtumitest kohtulahendite täpset koopiad saada, mis on Politsei- ja Piirivalveameti tegevusega seotud?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "320", - "question": "Kas ma saan kusagilt teada, kas mu lennureisi broneering on sisestatud BRIIS süsteemi?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "321", - "question": "Mul on tähtajaline elamisluba lõppemas, kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "322", - "question": "Ma olen Euroopa Liidu kodanik ja tahan Eestis elama asuda. Kas ma pean kohe ID-kaarti taotlema, või on mõni teine samm tähtsam?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "323", - "question": "Mul kadus meremehe teenistusraamat. Mida ma pean tegema, et saada uus?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "324", - "question": "Kuidas ma saan kontrollida, millised riigi e-teenustesse sisselogimised mul hetkel aktiivsed on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "325", - "question": "Kas ma saan endiselt avada vanemaid digiallkirjastatud dokumente, mis on loodud BDOC 1.0 formaadis, ja kui ei, siis millega?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "326", - "question": "Mul on SIM-kaart alles, aga ma unustasin PIN1-koodi. Kuidas ma saan selle tagasi või saan selle muuta?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "327", - "question": "Kuni millal neid ajutisi reisidokumente, mis olid antud 2014-2020, saab veel kasutada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "328", - "question": "Kas saaks teada, miks Jasvanth Jabez Jeevan David'i elamisluba tühistati?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "329", - "question": "Mul on probleem: DigiDoc4 annab teate, et sertifikaatide usaldusnimekirja uuendamine ebaõnnestus. Mida ma teen?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "330", - "question": "Kas Politsei ja Piirivalveamet kasutab BRIIS-i süsteemi kontrollimiseks, kui ma lennule piletin?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "331", - "question": "Mul on DigiDoc4-s viga, kus sertifikaatide usaldusnimekirja uuendamine ei õnnestu. Mida ma peaksin tegema?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "332", - "question": "Mul on õppimisviisaga Eestis, aga see on lõppemas. Kas ma saan jääda Eestisse pärast selle kehtivuse lõppu ja kui kauaks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "333", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohta, et Tallinna Ringkonnakohus mõistis 13. oktoobril 2020 kohtuotsuse kriminaalasjas 1-20-4858?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "334", - "question": "Mul on küsimus, kuidas ma saan end piiriületusjärjekorda registreerida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "335", - "question": "Kas ma saan BRIIS süsteemist leitud andmetest teada, kuidas see minu lennureisi broneeringu andmeid kasutab?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "336", - "question": "Kas Politsei- ja Piirivalveameti suhtes on 2019. aastal olnud palju korruptsiooniga seotud kohtulahenditega seotud juhtumeid ja kas ma saan nendest kohtulahenditest rohkem teada?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "337", - "question": "Mul on Mobiil-ID sertifikaat, aga ei tea, kuidas selle kehtivust kontrollida. Kas saan seda teha ID.ee lehel või kusagil mujal?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "338", - "question": "Mul on suurinvestori elamisluba. Kuidas ma saan seda pikendada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "339", - "question": "Kuidas ma Mobiil-ID-d saan tellida ja mis selleks vaja?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "340", - "question": "Kas ma saan vanemate digiallkirjastatud dokumentide avamiseks kasutada DigiDoc4 rakendust, kui need on loodud enne BDOC 2.1?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "341", - "question": "Meie ettevõte tahab kasutada DigiDoc4 rakendust digiallkirjastamiseks. Kas me peame sõlmima eraldi lepingu SK ID Solutions AS-iga, et saada piisavalt digiallkirju kuus?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "342", - "question": "Ma proovin mobiil-IDga panka sisse logida, aga saan mingit veateadet. Kuidas ma saan seda probleemi lahendada?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "343", - "question": "Mul on ettevõtlusega seotud elamisluba. Kuidas ma saan selle pikendamise taotluse esitada ja milliseid dokumente ma pean selleks koguma?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "344", - "question": "Ma elan Berliinis ja tahan tellida passi. Kas ma saan selle tellida Saksamaa saatkonna kaudu?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "345", - "question": "Ma saan aru, et piiriületus võib olla aeglane. Kui ma peaksin ootama üldises järjekorras, siis kuidas saan teada, palju sõidukeid on ees?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "346", - "question": "Mul on ID-kaart lukus, kuidas saan uued PIN- ja PUK-koodid tellida?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "347", - "question": "Meil on ettevõte ja tahame oma teenusesse mobiil-ID digiallkirjastamise võimaluse lisada. Kust alustada ja kas ID.ee pakub selleks mingit riigipoolset tarkvara või dokumentatsiooni?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "348", - "question": "Kuidas ma saan ID-tarkvara installitud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "349", - "question": "Mul on probleem Mobiil-IDga dokumentide allkirjastamisel, DigiDoc4 näitab mingit SSL ühenduse veateadet. Kuidas ma selle saan lahendatud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "350", - "question": "Ma unustasin mobiil-ID PIN1 koodi, mida ma teen?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "351", - "question": "Mul on Mobiil-ID sertifikaat, aga ma ei tea, kas see on veel kehtiv. Kust ma saan selle kehtivusaega kontrollida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "352", - "question": "Kas ma saan teada, kas mul on veel kehtiv ajutine reisidokument, mis oli välja antud enne 18. detsembril 2020?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "353", - "question": "Mul on probleem Mobiil-IDga dokumente allkirjastamisel, kuvatakse mingit veateadet. Kust ma leian infot selle lahendamiseks?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "354", - "question": "Mul on 16-aastane poeg, kes elab minu juures elamisloa alusel. Kust ma saan teada, kuidas tema elamisluba pikendada ja millised paberid selleks vaja on?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "355", - "question": "Meil on e-teenus, kus tahaksime mobiil-ID abil digiallkirju võimaldada. Kuidas me selleks kõige paremini hakkama saame ja mis ressursse meil vaja läheb?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "356", - "question": "Kas ma saan endiselt avada vanemaid digiallkirjastatud dokumente, mis on loodud enne BDOC 2.1 kasutuselevõttu, ja kui jah, siis millega?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "357", - "question": "Mul on ettevõtlusega seotud elamisluba ja see on lõppemas. Kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks esitama?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "358", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohtuotsuse kohta, mis on 13. oktoobri 2020 ja puudutab karistusseadustiku § 300¹ lg 2?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "359", - "question": "Mul on elamisluba suurinvestorina. Kuidas ma saan selle pikendada ja milliseid dokumente ma pean selleks esitama?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "360", - "question": "Mul on mure, et saan palju kahtlaseid e-kirju. Kuidas ma saan aru, kas kiri on päris või pahatahtlik ja mida ma peaks tegema, kui ma kahtlustan, et kiri on võlts?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "361", - "question": "Kas Politsei- ja Piirivalveamet teab midagi selle kohta, et Tallinna Ringkonnakohus mõistis 13. oktoobril 2020 süüdimõistmise kriminaalasjas 1-20-4858?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "362", - "question": "Kuidas ma saan teada, kas mul on keeld Eestisse sisenemiseks, kui mul on varem saanud lahkumisettekirjutus?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "363", - "question": "Kui mu õppimisviisaks välismaalase elamisluba lõppeb, kas ma saan siis Eestis töötada ja kui kaua ma saan riigis viibida?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "364", - "question": "Ma kaotasin oma meresõidutunnistuse, mida ma pean tegema?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "365", - "question": "Mul on ID-kaart, aga saan aru, kuidas PIN2-koodi sisestada rakenduses?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "366", - "question": "Mul on Mobiil-ID SIM kaart, aga ma ei leia PIN-koode. Kus ma need leian?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "367", - "question": "Kelle tähtajaline elamisluba tühistati 29. novembril?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "368", - "question": "Kuidas ma saan ID-tarkvara installitud?" - }, - { - "agency_name": "id.ee", - "agency_id": "1234", - "id": "369", - "question": "Mul on Mobiil-ID SIM-kaart, aga ma ei leia PIN-koode. Kus ma need leian?" - }, - { - "agency_name": "Politsei- ja Piirivalveamet", - "agency_id": "234", - "id": "370", - "question": "Mul kadus meresõidutunnistus, mida ma peaksin tegema?" - } - ], - "total_items": 370, - "metadata": { - "aggregation_timestamp": "2025-07-01 09:13:56", - "merge_strategy": "combine_arrays", - "source_count": 37 - } -} \ No newline at end of file diff --git a/src/training/requirements.txt b/src/training/requirements.txt index dffebe44..84dbe8be 100644 --- a/src/training/requirements.txt +++ b/src/training/requirements.txt @@ -34,3 +34,8 @@ py-cpuinfo>=8.0.0 # For hyperparameter optimization (optional) optuna>=2.10.0 + +loguru==0.7.3 + +onnx==1.18.0 +onnxruntime==1.22.0 diff --git a/src/training/scripts/constants.py b/src/training/scripts/constants.py index fd1d8689..be9a9a61 100644 --- a/src/training/scripts/constants.py +++ b/src/training/scripts/constants.py @@ -13,3 +13,25 @@ "max_length": 128, }, } +LOG_DIRECTORY = "/app/src/training/logs" +DATASETS_ARTIFACTS_DIR = "/app/src/training/dataset_artifacts" +MODELS_DIR = "/app/models" +TRAINING_DATASET_FOLDER_NAME = "training_datasets" +PROCESSED_DATASET_FOLDER_NAME = "processed_datasets" +LOG_FORMAT = "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}" +LOG_FILE_NAME = "train.log" +ROTATION_SIZE = "100 MB" +RETENTION_PERIOD = "10 days" +LOG_FILE_HANDLER_FORMAT = ( + "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}" +) +SEED = 42 # For reproducibility +REQUIRED_TRAINING_FILES = ["train.json", "val.json", "test.json", "label_mappings.json"] +TEST_SIZE = 0.2 # Proportion of the dataset to include in the test split +VALIDATION_SIZE = 0.1 # Proportion of the dataset to include in the +RANDOM_STATE = 42 # Random state for reproducibility in train-test split +PROCESSED_DATASET_DIR = "/app/src/training/dataset_artifacts/processed_datasets" +S3_FERRY_BASE_URL = "http://gc-s3-ferry:3000" +TRAINING_JOB_STATUS_UPDATE_URL = ( + "http://resql:8082/global-classifier/update-training-job-status" +) diff --git a/src/training/scripts/create_datasets.py b/src/training/scripts/create_datasets.py index 419b8825..4a4d19c2 100644 --- a/src/training/scripts/create_datasets.py +++ b/src/training/scripts/create_datasets.py @@ -2,352 +2,438 @@ from sklearn.model_selection import train_test_split from pathlib import Path from typing import Dict, List, Tuple, Iterator -import logging import gc import argparse +import sys +import os + +from loguru import logger +from scripts.constants import ( + LOG_DIRECTORY, + LOG_FORMAT, + LOG_FILE_NAME, + ROTATION_SIZE, + RETENTION_PERIOD, + LOG_FILE_HANDLER_FORMAT, +) + +os.makedirs(LOG_DIRECTORY, exist_ok=True) + +# Remove default handler and add custom ones +logger.remove() + +# Add console handler for immediate feedback +logger.add( + sys.stderr, + level="DEBUG", + format=LOG_FORMAT, + colorize=True, +) + +# Add file handler +logger.add( + sink=os.path.join(LOG_DIRECTORY, LOG_FILE_NAME), + level="DEBUG", + rotation=ROTATION_SIZE, + retention=RETENTION_PERIOD, + backtrace=True, + diagnose=True, + format=LOG_FILE_HANDLER_FORMAT, +) -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) class ScalableDatasetProcessor: """Scalable processor for large aggregated datasets.""" - - def __init__(self, dataset_path: str, output_dir: str = "data/processed", - chunk_size: int = 10000): + + def __init__(self, dataset_path: str, output_dir: str, chunk_size: int = 10000): self.dataset_path = dataset_path self.dataset_id = Path(dataset_path).stem - + # Create dataset-specific output directory self.base_output_dir = Path(output_dir) self.output_dir = self.base_output_dir / f"dataset_{self.dataset_id}" self.output_dir.mkdir(parents=True, exist_ok=True) - + self.chunk_size = chunk_size - + logger.info(f"Dataset ID: {self.dataset_id}") logger.info(f"Output directory: {self.output_dir}") - + def estimate_dataset_size(self) -> int: """Estimate dataset size without loading full data.""" - with open(self.dataset_path, 'r', encoding='utf-8') as f: + with open(self.dataset_path, "r", encoding="utf-8") as f: # Try to read just metadata first try: for line_num, line in enumerate(f): if '"total_items"' in line: # Extract total_items value import re + match = re.search(r'"total_items":\s*(\d+)', line) if match: return int(match.group(1)) if line_num > 10: # Don't read too far break - except: + except Exception: pass - + # Fallback: count items by streaming return self._count_items_streaming() - + def _count_items_streaming(self) -> int: """Count items by streaming through the file.""" - with open(self.dataset_path, 'r', encoding='utf-8') as f: + with open(self.dataset_path, "r", encoding="utf-8") as f: content = f.read() # Quick estimate based on question count return content.count('"question"') - - def load_data_chunked(self, chunk_size: int = None) -> Iterator[Tuple[List[str], List[str]]]: + + def load_data_chunked( + self, chunk_size: int = None + ) -> Iterator[Tuple[List[str], List[str]]]: """Load data in chunks for memory efficiency.""" if chunk_size is None: chunk_size = self.chunk_size - + logger.info(f"Loading data in chunks of {chunk_size}") - - with open(self.dataset_path, 'r', encoding='utf-8') as f: + + with open(self.dataset_path, "r", encoding="utf-8") as f: data = json.load(f) - - aggregated_data = data.get('aggregated_data', []) - + + aggregated_data = data.get("aggregated_data", []) + # Process in chunks for i in range(0, len(aggregated_data), chunk_size): - chunk = aggregated_data[i:i + chunk_size] - + chunk = aggregated_data[i : i + chunk_size] + texts = [] labels = [] - + for item in chunk: - question = item.get('question', '').strip() - agency_name = item.get('agency_name', '').strip() - + question = item.get("question", "").strip() + agency_name = item.get("agency_name", "").strip() + if question and agency_name: texts.append(question) labels.append(agency_name) - - logger.info(f"Processed chunk {i//chunk_size + 1}, {len(texts)} samples") + + logger.info(f"Processed chunk {i // chunk_size + 1}, {len(texts)} samples") yield texts, labels - + def load_data_memory_optimized(self) -> Tuple[List[str], List[str], Dict]: """Load data with memory optimization for large datasets.""" estimated_size = self.estimate_dataset_size() logger.info(f"Estimated dataset size: {estimated_size} items") - + # Adjust chunk size based on dataset size if estimated_size > 100000: # 100K+ items self.chunk_size = 5000 logger.info("Large dataset detected, using smaller chunks") - elif estimated_size > 1000000: # 1M+ items + elif estimated_size > 1000000: # 1M+ items self.chunk_size = 1000 logger.info("Very large dataset detected, using minimal chunks") - + # Pre-allocate lists for better performance texts = [] labels = [] - + # Process in chunks and merge for chunk_texts, chunk_labels in self.load_data_chunked(): texts.extend(chunk_texts) labels.extend(chunk_labels) - + # Periodic garbage collection for very large datasets if len(texts) % 50000 == 0: gc.collect() logger.info(f"Processed {len(texts)} samples so far...") - + # Load metadata separately to save memory - with open(self.dataset_path, 'r', encoding='utf-8') as f: + with open(self.dataset_path, "r", encoding="utf-8") as f: data = json.load(f) - metadata = data.get('metadata', {}) - + metadata = data.get("metadata", {}) + logger.info(f"Total loaded: {len(texts)} samples") logger.info(f"Found {len(set(labels))} unique agencies") - + return texts, labels, metadata - - def create_label_mapping_optimized(self, labels: List[str]) -> Tuple[Dict[str, int], Dict[int, str]]: + + def create_label_mapping_optimized( + self, labels: List[str] + ) -> Tuple[Dict[str, int], Dict[int, str]]: """Memory-optimized label mapping creation.""" # Use set for O(1) uniqueness check, then sort unique_labels = sorted(set(labels)) - + label_to_id = {label: idx for idx, label in enumerate(unique_labels)} id_to_label = {idx: label for label, idx in label_to_id.items()} - + logger.info(f"Created label mapping for {len(unique_labels)} classes") - + # Clear intermediate variables del unique_labels gc.collect() - + return label_to_id, id_to_label - - def split_dataset_large(self, texts: List[str], labels: List[str], - test_size: float = 0.2, val_size: float = 0.1, - random_state: int = 42) -> Dict[str, List]: + + def split_dataset_large( + self, + texts: List[str], + labels: List[str], + test_size: float = 0.2, + val_size: float = 0.1, + random_state: int = 42, + ) -> Dict[str, List]: """Memory-efficient dataset splitting for large datasets.""" logger.info(f"Splitting large dataset: {len(texts)} samples") - + # For very large datasets, use indices instead of copying data if len(texts) > 100000: - return self._split_by_indices(texts, labels, test_size, val_size, random_state) + return self._split_by_indices( + texts, labels, test_size, val_size, random_state + ) else: return self._split_direct(texts, labels, test_size, val_size, random_state) - - def _split_by_indices(self, texts: List[str], labels: List[str], - test_size: float, val_size: float, random_state: int) -> Dict[str, List]: + + def _split_by_indices( + self, + texts: List[str], + labels: List[str], + test_size: float, + val_size: float, + random_state: int, + ) -> Dict[str, List]: """Split using indices to save memory.""" indices = list(range(len(texts))) - + # Split indices instead of data temp_indices, test_indices = train_test_split( - indices, test_size=test_size, random_state=random_state, - stratify=[labels[i] for i in indices] + indices, + test_size=test_size, + random_state=random_state, + stratify=[labels[i] for i in indices], ) - + val_ratio = val_size / (1 - test_size) train_indices, val_indices = train_test_split( - temp_indices, test_size=val_ratio, random_state=random_state, - stratify=[labels[i] for i in temp_indices] + temp_indices, + test_size=val_ratio, + random_state=random_state, + stratify=[labels[i] for i in temp_indices], ) - + # Create splits using indices splits = { - 'train': { - 'texts': [texts[i] for i in train_indices], - 'labels': [labels[i] for i in train_indices] + "train": { + "texts": [texts[i] for i in train_indices], + "labels": [labels[i] for i in train_indices], }, - 'val': { - 'texts': [texts[i] for i in val_indices], - 'labels': [labels[i] for i in val_indices] + "val": { + "texts": [texts[i] for i in val_indices], + "labels": [labels[i] for i in val_indices], + }, + "test": { + "texts": [texts[i] for i in test_indices], + "labels": [labels[i] for i in test_indices], }, - 'test': { - 'texts': [texts[i] for i in test_indices], - 'labels': [labels[i] for i in test_indices] - } } - - logger.info(f"Split sizes: Train={len(train_indices)}, Val={len(val_indices)}, Test={len(test_indices)}") + + logger.info( + f"Split sizes: Train={len(train_indices)}, Val={len(val_indices)}, Test={len(test_indices)}" + ) return splits - - def _split_direct(self, texts: List[str], labels: List[str], - test_size: float, val_size: float, random_state: int) -> Dict[str, List]: + + def _split_direct( + self, + texts: List[str], + labels: List[str], + test_size: float, + val_size: float, + random_state: int, + ) -> Dict[str, List]: """Direct splitting for smaller datasets.""" X_temp, X_test, y_temp, y_test = train_test_split( - texts, labels, test_size=test_size, random_state=random_state, stratify=labels + texts, + labels, + test_size=test_size, + random_state=random_state, + stratify=labels, ) - + val_ratio = val_size / (1 - test_size) X_train, X_val, y_train, y_val = train_test_split( - X_temp, y_temp, test_size=val_ratio, random_state=random_state, stratify=y_temp + X_temp, + y_temp, + test_size=val_ratio, + random_state=random_state, + stratify=y_temp, ) - + return { - 'train': {'texts': X_train, 'labels': y_train}, - 'val': {'texts': X_val, 'labels': y_val}, - 'test': {'texts': X_test, 'labels': y_test} + "train": {"texts": X_train, "labels": y_train}, + "val": {"texts": X_val, "labels": y_val}, + "test": {"texts": X_test, "labels": y_test}, } - - def save_splits_chunked(self, splits: Dict, label_to_id: Dict[str, int], - id_to_label: Dict[int, str], metadata: Dict): + + def save_splits_chunked( + self, + splits: Dict, + label_to_id: Dict[str, int], + id_to_label: Dict[int, str], + metadata: Dict, + ): """Save splits in chunks for large datasets.""" - + for split_name, split_data in splits.items(): # Save files directly in dataset-specific folder split_file = self.output_dir / f"{split_name}.json" - + # For large splits, process in chunks - if len(split_data['texts']) > 50000: + if len(split_data["texts"]) > 50000: self._save_large_split(split_file, split_data, label_to_id) else: self._save_small_split(split_file, split_data, label_to_id) - + logger.info(f"Saved {split_name} split to {split_file}") - + # Save mappings and stats self._save_metadata(label_to_id, id_to_label, metadata, splits) - - def _save_large_split(self, file_path: Path, split_data: Dict, label_to_id: Dict[str, int]): + + def _save_large_split( + self, file_path: Path, split_data: Dict, label_to_id: Dict[str, int] + ): """Save large split using streaming JSON.""" - texts = split_data['texts'] - labels = split_data['labels'] + texts = split_data["texts"] + labels = split_data["labels"] label_ids = [label_to_id[label] for label in labels] - + output = { - 'texts': texts, - 'labels': labels, - 'label_ids': label_ids, - 'num_samples': len(texts) + "texts": texts, + "labels": labels, + "label_ids": label_ids, + "num_samples": len(texts), } - - with open(file_path, 'w', encoding='utf-8') as f: + + with open(file_path, "w", encoding="utf-8") as f: json.dump(output, f, ensure_ascii=False, indent=2) - - def _save_small_split(self, file_path: Path, split_data: Dict, label_to_id: Dict[str, int]): + + def _save_small_split( + self, file_path: Path, split_data: Dict, label_to_id: Dict[str, int] + ): """Save small split normally.""" - label_ids = [label_to_id[label] for label in split_data['labels']] - + label_ids = [label_to_id[label] for label in split_data["labels"]] + output = { - 'texts': split_data['texts'], - 'labels': split_data['labels'], - 'label_ids': label_ids, - 'num_samples': len(split_data['texts']) + "texts": split_data["texts"], + "labels": split_data["labels"], + "label_ids": label_ids, + "num_samples": len(split_data["texts"]), } - - with open(file_path, 'w', encoding='utf-8') as f: + + with open(file_path, "w", encoding="utf-8") as f: json.dump(output, f, ensure_ascii=False, indent=2) - - def _save_metadata(self, label_to_id: Dict, id_to_label: Dict, metadata: Dict, splits: Dict): + + def _save_metadata( + self, label_to_id: Dict, id_to_label: Dict, metadata: Dict, splits: Dict + ): """Save metadata files.""" # Label mappings mappings_file = self.output_dir / "label_mappings.json" mappings = { - 'label_to_id': label_to_id, - 'id_to_label': id_to_label, - 'num_classes': len(label_to_id), - 'dataset_id': self.dataset_id, - 'original_metadata': metadata + "label_to_id": label_to_id, + "id_to_label": id_to_label, + "num_classes": len(label_to_id), + "dataset_id": self.dataset_id, + "original_metadata": metadata, } - - with open(mappings_file, 'w', encoding='utf-8') as f: + + with open(mappings_file, "w", encoding="utf-8") as f: json.dump(mappings, f, ensure_ascii=False, indent=2) - + # Statistics stats_file = self.output_dir / "stats.json" - total_samples = sum(len(split['texts']) for split in splits.values()) - + total_samples = sum(len(split["texts"]) for split in splits.values()) + stats = { - 'dataset_id': self.dataset_id, - 'total_samples': total_samples, - 'num_classes': len(label_to_id), - 'classes': list(label_to_id.keys()), - 'splits': { + "dataset_id": self.dataset_id, + "total_samples": total_samples, + "num_classes": len(label_to_id), + "classes": list(label_to_id.keys()), + "splits": { split_name: { - 'num_samples': len(split_data['texts']), - 'percentage': len(split_data['texts']) / total_samples * 100 + "num_samples": len(split_data["texts"]), + "percentage": len(split_data["texts"]) / total_samples * 100, } for split_name, split_data in splits.items() - } + }, } - - with open(stats_file, 'w', encoding='utf-8') as f: + + with open(stats_file, "w", encoding="utf-8") as f: json.dump(stats, f, ensure_ascii=False, indent=2) - + logger.info(f"Saved label mappings to {mappings_file}") logger.info(f"Saved dataset statistics to {stats_file}") - - def process_dataset(self, test_size: float = 0.2, val_size: float = 0.1, - random_state: int = 42): + + def process_dataset( + self, test_size: float = 0.2, val_size: float = 0.1, random_state: int = 42 + ): """Main processing pipeline optimized for large datasets.""" - logger.info(f"Starting scalable dataset processing for dataset ID: {self.dataset_id}") - + logger.info( + f"Starting scalable dataset processing for dataset ID: {self.dataset_id}" + ) + # Load data with memory optimization texts, labels, metadata = self.load_data_memory_optimized() - + # Create label mappings efficiently label_to_id, id_to_label = self.create_label_mapping_optimized(labels) - + # Split dataset efficiently - splits = self.split_dataset_large(texts, labels, test_size, val_size, random_state) - + splits = self.split_dataset_large( + texts, labels, test_size, val_size, random_state + ) + # Save with chunking for large datasets self.save_splits_chunked(splits, label_to_id, id_to_label, metadata) - + # Cleanup del texts, labels gc.collect() - + logger.info("Scalable dataset processing completed successfully!") - + return { - 'dataset_id': self.dataset_id, - 'output_dir': str(self.output_dir), - 'num_classes': len(label_to_id), - 'splits': {k: len(v['texts']) for k, v in splits.items()} + "dataset_id": self.dataset_id, + "output_dir": str(self.output_dir), + "num_classes": len(label_to_id), + "splits": {k: len(v["texts"]) for k, v in splits.items()}, } + def main(): - parser = argparse.ArgumentParser(description='Process large aggregated datasets for training') - parser.add_argument('--dataset_path', type=str, required=True) - parser.add_argument('--output_dir', type=str, default='data/processed') - parser.add_argument('--chunk_size', type=int, default=10000) - parser.add_argument('--test_size', type=float, default=0.2) - parser.add_argument('--val_size', type=float, default=0.1) - parser.add_argument('--random_state', type=int, default=42) - + parser = argparse.ArgumentParser( + description="Process large aggregated datasets for training" + ) + parser.add_argument("--dataset_path", type=str, required=True) + parser.add_argument("--output_dir", type=str, default="data/processed") + parser.add_argument("--chunk_size", type=int, default=10000) + parser.add_argument("--test_size", type=float, default=0.2) + parser.add_argument("--val_size", type=float, default=0.1) + parser.add_argument("--random_state", type=int, default=42) + args = parser.parse_args() - + processor = ScalableDatasetProcessor( - args.dataset_path, - args.output_dir, - args.chunk_size + args.dataset_path, args.output_dir, args.chunk_size ) - + result = processor.process_dataset( - test_size=args.test_size, - val_size=args.val_size, - random_state=args.random_state + test_size=args.test_size, val_size=args.val_size, random_state=args.random_state ) - - print(f"\nDataset processing completed!") - print(f"Dataset ID: {result['dataset_id']}") - print(f"Number of classes: {result['num_classes']}") - print(f"Output directory: {result['output_dir']}") + + logger.info("\nDataset processing completed!") + logger.info(f"Dataset ID: {result['dataset_id']}") + logger.info(f"Number of classes: {result['num_classes']}") + logger.info(f"Output directory: {result['output_dir']}") + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/training/scripts/create_datasets_v1.py b/src/training/scripts/create_datasets_v1.py deleted file mode 100644 index 34146de0..00000000 --- a/src/training/scripts/create_datasets_v1.py +++ /dev/null @@ -1,333 +0,0 @@ -import os -import sys -import pandas as pd -import numpy as np -from sklearn.model_selection import train_test_split -import json -import argparse -from loguru import logger -import matplotlib.pyplot as plt -import seaborn as sns - -logger.remove() -# add stout handler -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -def load_raw_data(input_file): - """ - Load and process the raw conversation data. - - Args: - input_file: Path to the raw data file - - Returns: - DataFrame with processed conversation data - """ - logger.info(f"Loading data from {input_file}") - - if input_file.endswith(".csv"): - df = pd.read_csv(input_file) - else: - raise ValueError(f"Unsupported file format: {input_file}") - - # Check required columns - required_cols = ["turn", "speaker", "text", "agency"] - for col in required_cols: - if col not in df.columns: - raise ValueError(f"Required column '{col}' not found in data") - - return df - - -def process_conversations(df): - """ - Process the raw data into conversation-level samples for classification. - Each conversation (all turns together) becomes one training instance. - - Args: - df: DataFrame with raw conversation data - - Returns: - DataFrame with one row per conversation - """ - logger.info("Processing conversations...") - - # Group by conversation_id - conversations = [] - for conv_id, group in df.groupby("conversation_id"): - agency = group["agency"].iloc[0] - - # Format the conversation as a string with speaker labels - conversation_text = [] - for _, row in group.iterrows(): - conversation_text.append(f"{row['speaker']}: {row['text']}") - - full_text = "\n".join(conversation_text) - - conversations.append( - { - "conversation_id": conv_id, - "text": full_text, - "agency": agency, - "num_turns": len(group), - } - ) - - conversation_df = pd.DataFrame(conversations) - - # Print statistics - logger.info( - f"Created {len(conversation_df)} conversation instances from {len(df)} turns" - ) - logger.info(f"Conversations per agency:") - agency_counts = conversation_df["agency"].value_counts() - for agency, count in agency_counts.items(): - logger.info(f" {agency}: {count} conversations") - - return conversation_df - - -def split_dataset(df, test_size=0.2, val_size=0.1, random_state=42): - """ - Split the dataset into training, validation, and test sets. - - Args: - df: DataFrame with processed conversation data - test_size: Proportion for the test set - val_size: Proportion for the validation set - random_state: Random seed for reproducibility - - Returns: - train_df, val_df, test_df: Split DataFrames - """ - # Check if we can use stratification (at least 2 samples per class) - class_counts = df["agency"].value_counts() - use_stratify = all(count >= 2 for count in class_counts) - - if not use_stratify: - logger.warning( - "Warning: Some classes have fewer than 2 samples. Stratification disabled." - ) - stratify = None - else: - stratify = df["agency"] - - # First split: train+val vs test - train_val_df, test_df = train_test_split( - df, test_size=test_size, random_state=random_state, stratify=stratify - ) - - # For the second split, check again if we can stratify - if use_stratify: - val_class_counts = train_val_df["agency"].value_counts() - use_val_stratify = all(count >= 2 for count in val_class_counts) - val_stratify = train_val_df["agency"] if use_val_stratify else None - - if not use_val_stratify: - logger.warning( - "Warning: After first split, some classes have too few samples for stratified validation split." - ) - else: - val_stratify = None - - # Second split: train vs val (calculated from remaining data) - val_ratio = val_size / (1 - test_size) - - train_df, val_df = train_test_split( - train_val_df, - test_size=val_ratio, - random_state=random_state, - stratify=val_stratify, - ) - - logger.info( - f"Dataset splits: Train={len(train_df)}, Val={len(val_df)}, Test={len(test_df)}" - ) - - # Print class distribution in each split - logger.info("\nClass distribution:") - for split_name, split_df in [ - ("Train", train_df), - ("Val", val_df), - ("Test", test_df), - ]: - class_dist = split_df["agency"].value_counts().to_dict() - logger.info(f" {split_name}: {class_dist}") - - return train_df, val_df, test_df - - -def analyze_dataset(df, output_dir): - """ - Analyze the dataset and save visualizations. - - Args: - df: DataFrame with processed conversation data - output_dir: Directory to save analysis files - """ - os.makedirs(output_dir, exist_ok=True) - - # 1. Class distribution - plt.figure(figsize=(10, 6)) - sns.countplot(y="agency", data=df) - plt.title("Distribution of Agencies") - plt.xlabel("Count") - plt.ylabel("Agency") - plt.tight_layout() - plt.savefig(os.path.join(output_dir, "agency_distribution.png")) - plt.close() - - # 2. Number of turns distribution - plt.figure(figsize=(10, 6)) - sns.countplot(x="num_turns", data=df) - plt.title("Distribution of Conversation Length (Turns)") - plt.xlabel("Number of Turns") - plt.ylabel("Count") - plt.tight_layout() - plt.savefig(os.path.join(output_dir, "turns_distribution.png")) - plt.close() - - # 3. Text length distribution - df["text_length"] = df["text"].apply(len) - plt.figure(figsize=(10, 6)) - sns.histplot(data=df, x="text_length", bins=20) - plt.title("Distribution of Text Length") - plt.xlabel("Number of Characters") - plt.ylabel("Count") - plt.tight_layout() - plt.savefig(os.path.join(output_dir, "text_length_distribution.png")) - plt.close() - - # 4. Summary statistics as JSON - stats = { - "num_conversations": int(len(df)), - "conversations_per_agency": { - k: int(v) for k, v in df["agency"].value_counts().to_dict().items() - }, - "avg_turns": float(df["num_turns"].mean()), - "min_turns": int(df["num_turns"].min()), - "max_turns": int(df["num_turns"].max()), - "avg_text_length": float(df["text_length"].mean()), - "min_text_length": int(df["text_length"].min()), - "max_text_length": int(df["text_length"].max()), - } - - with open(os.path.join(output_dir, "dataset_stats.json"), "w") as f: - json.dump(stats, f, indent=2) - - # 5. Create a summary markdown file - with open(os.path.join(output_dir, "dataset_summary.md"), "w") as f: - f.write("# Synthetic Dataset Summary\n\n") - - f.write("## Dataset Statistics\n\n") - f.write(f"- Total conversations: {stats['num_conversations']}\n") - f.write(f"- Average turns per conversation: {stats['avg_turns']:.2f}\n") - f.write(f"- Average text length: {stats['avg_text_length']:.2f} characters\n\n") - - f.write("## Agency Distribution\n\n") - f.write("| Agency | Count | Percentage |\n") - f.write("|--------|-------|------------|\n") - - for agency, count in stats["conversations_per_agency"].items(): - percentage = (count / stats["num_conversations"]) * 100 - f.write(f"| {agency} | {count} | {percentage:.2f}% |\n") - - -def save_datasets(train_df, val_df, test_df, output_dir): - """ - Save the datasets to CSV files. - - Args: - train_df, val_df, test_df: DataFrames for each split - output_dir: Directory to save the files - """ - os.makedirs(output_dir, exist_ok=True) - - # Ensure we only keep necessary columns - columns = ["text", "agency"] - - train_df[columns].to_csv(os.path.join(output_dir, "train.csv"), index=False) - val_df[columns].to_csv(os.path.join(output_dir, "val.csv"), index=False) - test_df[columns].to_csv(os.path.join(output_dir, "test.csv"), index=False) - - print(f"Datasets saved to {output_dir}") - - -def main(): - parser = argparse.ArgumentParser( - description="Prepare datasets for agency classification" - ) - parser.add_argument( - "--input_file", - type=str, - default="data/raw/synthetic_conversations.csv", - help="Path to the raw conversation data", - ) - parser.add_argument( - "--output_dir", - type=str, - default="data/processed", - help="Directory to save the processed datasets", - ) - parser.add_argument( - "--analysis_dir", - type=str, - default="data/analysis", - help="Directory to save dataset analysis", - ) - parser.add_argument( - "--test_size", - type=float, - default=0.2, - help="Proportion of data for the test set", - ) - parser.add_argument( - "--val_size", - type=float, - default=0.1, - help="Proportion of data for the validation set", - ) - parser.add_argument( - "--augment", - action="store_true", - help="Create augmented data for additional synthetic examples", - ) - parser.add_argument( - "--augment_factor", - type=int, - default=2, - help="How many augmented examples per original", - ) - parser.add_argument( - "--random_seed", type=int, default=42, help="Random seed for reproducibility" - ) - - args = parser.parse_args() - - # Set random seed - np.random.seed(args.random_seed) - - # Load and process data - raw_df = load_raw_data(args.input_file) - conversation_df = process_conversations(raw_df) - - # Analyze dataset - analyze_dataset(conversation_df, args.analysis_dir) - - # Split dataset - train_df, val_df, test_df = split_dataset( - conversation_df, - test_size=args.test_size, - val_size=args.val_size, - random_state=args.random_seed, - ) - - # Save datasets - save_datasets(train_df, val_df, test_df, args.output_dir) - - logger.info("Dataset preparation complete!") - - -if __name__ == "__main__": - main() diff --git a/src/training/scripts/evaluate.py b/src/training/scripts/evaluate.py deleted file mode 100644 index 5383e41d..00000000 --- a/src/training/scripts/evaluate.py +++ /dev/null @@ -1,611 +0,0 @@ -import os -import sys -import argparse -import json -import time -import torch -import numpy as np -from loguru import logger -import pandas as pd -from transformers import AutoModelForSequenceClassification, AutoTokenizer -from torch.utils.data import DataLoader -import mlflow -import psutil -import matplotlib.pyplot as plt -import seaborn as sns - -logger.remove() -# add stout handler -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -# Import from our utils script -from utils import ( - set_random_seeds, - compute_metrics, - measure_inference_speed, - measure_model_size, -) - -# Import custom dataset class from train.py -from train import TextClassificationDataset - - -def load_model_and_tokenizer(model_path, device): - """ - Load a model and tokenizer from the specified path. - - Args: - model_path (str): Path to the saved model - device (torch.device): Device to load the model onto - - Returns: - tuple: (model, tokenizer) - """ - logger.info(f"Loading model from {model_path}") - model = AutoModelForSequenceClassification.from_pretrained(model_path) - tokenizer = AutoTokenizer.from_pretrained(model_path) - - model.to(device) - return model, tokenizer - - -def load_test_data(data_path, tokenizer, max_seq_length=128): - """ - Load and prepare test data. - """ - logger.info(f"Loading test data from {data_path}") - - if data_path.endswith(".csv"): - df = pd.read_csv(data_path) - elif data_path.endswith(".json"): - df = pd.read_json(data_path, lines=True) - else: - raise ValueError(f"Unsupported file format: {data_path}") - - text_col = next( - (col for col in ["text", "content", "sentence"] if col in df.columns), None - ) - label_col = next( - (col for col in ["label", "class", "target"] if col in df.columns), None - ) - - if text_col is None or label_col is None: - raise ValueError("Could not identify text and label columns in the data") - - texts = df[text_col].values - labels = df[label_col].values - - # Create dataset and dataloader - dataset = TextClassificationDataset(texts, labels, tokenizer, max_seq_length) - dataloader = DataLoader(dataset, batch_size=32, shuffle=False, num_workers=4) - - return dataloader, texts, labels - - -def evaluate_model(model, dataloader, device): - """ - Evaluate the model on the test data. - """ - logger.info("Evaluating model accuracy...") - model.eval() - all_preds = [] - all_labels = [] - all_probs = [] - - with torch.no_grad(): - for batch in dataloader: - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - labels = batch["label"].to(device) - - outputs = model(input_ids=input_ids, attention_mask=attention_mask) - - logits = outputs.logits - probs = torch.nn.functional.softmax(logits, dim=1) - preds = torch.argmax(logits, dim=1) - - all_preds.extend(preds.cpu().numpy()) - all_labels.extend(labels.cpu().numpy()) - all_probs.extend(probs.cpu().numpy()) - - return np.array(all_preds), np.array(all_labels), np.array(all_probs) - - -def measure_inference_performance( - model, dataloader, device, num_runs=100, batch_sizes=None -): - """ - Measure inference performance metrics. - """ - logger.info("Measuring inference performance...") - performance_metrics = {} - - # Get a sample batch - sample_batch = next(iter(dataloader)) - - # Measure inference time for a single batch - inference_time = measure_inference_speed( - model, sample_batch, device, num_runs=num_runs, warm_up=10 - ) - - performance_metrics["avg_inference_time_seconds"] = inference_time - performance_metrics["avg_inference_time_ms"] = inference_time * 1000 - performance_metrics["samples_per_second"] = ( - sample_batch["input_ids"].shape[0] / inference_time - ) - - model_size_mb = measure_model_size(model) - performance_metrics["model_size_mb"] = model_size_mb - - memory_before = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024) # MB - - with torch.no_grad(): - _ = model( - input_ids=sample_batch["input_ids"].to(device), - attention_mask=sample_batch["attention_mask"].to(device), - ) - - memory_after = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024) # MB - performance_metrics["memory_usage_mb"] = memory_after - memory_before - - if batch_sizes and device == torch.device("cuda"): - batch_time_results = {} - for batch_size in batch_sizes: - batch = { - "input_ids": sample_batch["input_ids"][:1].repeat(batch_size, 1), - "attention_mask": sample_batch["attention_mask"][:1].repeat( - batch_size, 1 - ), - } - - # Measure time and average over 10 runs - start_time = time.time() - with torch.no_grad(): - for _ in range(10): - _ = model( - input_ids=batch["input_ids"].to(device), - attention_mask=batch["attention_mask"].to(device), - ) - end_time = time.time() - - batch_time = (end_time - start_time) / 10 - batch_time_results[batch_size] = batch_time - - performance_metrics["batch_size_benchmarks"] = batch_time_results - - return performance_metrics - - -def log_evaluation_results( - accuracy_metrics, performance_metrics, model_name, output_dir, run_id=None -): - """ - Log evaluation results to files and MLflow. - """ - os.makedirs(output_dir, exist_ok=True) - - all_metrics = { - "model_name": model_name, - "accuracy_metrics": accuracy_metrics, - "performance_metrics": performance_metrics, - "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"), - } - - metrics_path = os.path.join(output_dir, f"{model_name}_evaluation.json") - with open(metrics_path, "w") as f: - - def convert_for_json(obj): - if isinstance( - obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64) - ): - return int(obj) - elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)): - return float(obj) - elif isinstance(obj, (np.ndarray,)): - return obj.tolist() - elif isinstance(obj, dict): - return {k: convert_for_json(v) for k, v in obj.items()} - elif isinstance(obj, list): - return [convert_for_json(i) for i in obj] - else: - return obj - - json.dump(convert_for_json(all_metrics), f, indent=2) - - if run_id: - try: - mlflow.set_tracking_uri(os.environ.get("MLFLOW_TRACKING_URI", "mlruns")) - with mlflow.start_run(run_id=run_id): - # Log accuracy metrics - for k, v in accuracy_metrics.items(): - if k != "confusion_matrix" and not isinstance( - v, (dict, list, np.ndarray) - ): - mlflow.log_metric(f"test_{k}", v) - - # Log performance metrics - for k, v in performance_metrics.items(): - if not isinstance(v, (dict, list, np.ndarray)): - mlflow.log_metric(k, v) - - # Log the full results file - mlflow.log_artifact(metrics_path) - except Exception as e: - logger.warning(f"Warning: Failed to log to MLflow: {str(e)}") - - logger.info(f"Evaluation results saved to {metrics_path}") - - -def create_evaluation_plots( - predictions, true_labels, probabilities, model_name, output_dir -): - """ - Create evaluation plots and save them. - """ - os.makedirs(output_dir, exist_ok=True) - - cm = np.zeros((len(np.unique(true_labels)), len(np.unique(true_labels))), dtype=int) - for i, j in zip(true_labels, predictions): - cm[i][j] += 1 - - plt.figure(figsize=(10, 8)) - sns.heatmap( - cm, - annot=True, - fmt="d", - cmap="Blues", - xticklabels=[f"Class {i}" for i in range(cm.shape[0])], - yticklabels=[f"Class {i}" for i in range(cm.shape[0])], - ) - plt.xlabel("Predicted") - plt.ylabel("True") - plt.title(f"{model_name} Confusion Matrix") - plt.tight_layout() - - cm_path = os.path.join(output_dir, f"{model_name}_confusion_matrix.png") - plt.savefig(cm_path) - plt.close() - - # For binary classification, plot ROC curve - if probabilities.shape[1] == 2: - from sklearn.metrics import roc_curve, auc - - fpr, tpr, _ = roc_curve(true_labels, probabilities[:, 1]) - roc_auc = auc(fpr, tpr) - - plt.figure(figsize=(8, 6)) - plt.plot(fpr, tpr, lw=2, label=f"ROC curve (area = {roc_auc:.2f})") - plt.plot([0, 1], [0, 1], "k--", lw=2) - plt.xlim([0.0, 1.0]) - plt.ylim([0.0, 1.05]) - plt.xlabel("False Positive Rate") - plt.ylabel("True Positive Rate") - plt.title(f"{model_name} ROC Curve") - plt.legend(loc="lower right") - - roc_path = os.path.join(output_dir, f"{model_name}_roc_curve.png") - plt.savefig(roc_path) - plt.close() - - # Precision-Recall curve - from sklearn.metrics import precision_recall_curve, average_precision_score - - precision, recall, _ = precision_recall_curve(true_labels, probabilities[:, 1]) - avg_precision = average_precision_score(true_labels, probabilities[:, 1]) - - plt.figure(figsize=(8, 6)) - plt.plot(recall, precision, lw=2, label=f"PR curve (AP = {avg_precision:.2f})") - plt.xlim([0.0, 1.0]) - plt.ylim([0.0, 1.05]) - plt.xlabel("Recall") - plt.ylabel("Precision") - plt.title(f"{model_name} Precision-Recall Curve") - plt.legend(loc="lower left") - - pr_path = os.path.join(output_dir, f"{model_name}_pr_curve.png") - plt.savefig(pr_path) - plt.close() - - try: - mlflow.log_artifact(cm_path) - if probabilities.shape[1] == 2: - mlflow.log_artifact(roc_path) - mlflow.log_artifact(pr_path) - except: - logger.warning("Warning: Failed to log plots to MLflow") - pass - - -def plot_performance_comparison(model_names, performance_metrics_list, output_dir): - """ - Create performance comparison plots across models. - - """ - os.makedirs(output_dir, exist_ok=True) - - metrics_to_plot = ["avg_inference_time_ms", "samples_per_second", "model_size_mb"] - - for metric in metrics_to_plot: - values = [metrics.get(metric, 0) for metrics in performance_metrics_list] - - plt.figure(figsize=(10, 6)) - bars = plt.bar(model_names, values) - plt.title(f"Comparison of {metric}") - plt.ylabel(metric.replace("_", " ").title()) - plt.xlabel("Model") - - for bar, value in zip(bars, values): - if metric == "avg_inference_time_ms": - label = f"{value:.2f} ms" - elif metric == "samples_per_second": - label = f"{value:.1f}" - else: - label = f"{value:.1f} MB" - - plt.text( - bar.get_x() + bar.get_width() / 2, - bar.get_height() + 0.1, - label, - ha="center", - va="bottom", - ) - - plt.xticks(rotation=45) - plt.tight_layout() - - plot_path = os.path.join(output_dir, f"compare_{metric}.png") - plt.savefig(plot_path) - plt.close() - - try: - mlflow.log_artifact(plot_path) - except: - logger.warning("Warning: Failed to log comparison plots to MLflow") - pass - - -def evaluate_and_compare_models(model_paths, model_names, test_data_path, output_dir): - """ - Evaluate and compare multiple models on the same test data. - - """ - os.makedirs(output_dir, exist_ok=True) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - logger.info(f"Using device: {device}") - - all_accuracy_metrics = [] - all_performance_metrics = [] - - for model_path, model_name in zip(model_paths, model_names): - logger.info(f"\nEvaluating model: {model_name}") - - # Load model and tokenizer - model, tokenizer = load_model_and_tokenizer(model_path, device) - - # Load test data - dataloader, _, labels = load_test_data(test_data_path, tokenizer) - - # Determine number of classes - num_labels = len(np.unique(labels)) - logger.info(f"Detected {num_labels} classes") - - # Evaluate model accuracy - predictions, true_labels, probabilities = evaluate_model( - model, dataloader, device - ) - - accuracy_metrics, _ = compute_metrics(true_labels, predictions, probabilities) - all_accuracy_metrics.append(accuracy_metrics) - - model_output_dir = os.path.join(output_dir, model_name) - os.makedirs(model_output_dir, exist_ok=True) - - create_evaluation_plots( - predictions, true_labels, probabilities, model_name, model_output_dir - ) - - # Measure inference performance - batch_sizes = ( - [1, 2, 4, 8, 16, 32, 64] if device == torch.device("cuda") else None - ) - - performance_metrics = measure_inference_performance( - model, dataloader, device, batch_sizes=batch_sizes - ) - all_performance_metrics.append(performance_metrics) - - # Print results - logger.info(f"\nAccuracy Metrics for {model_name}:") - for k, v in accuracy_metrics.items(): - if k != "confusion_matrix": - logger.info(f" {k}: {v}") - - logger.info(f"\nPerformance Metrics for {model_name}:") - for k, v in performance_metrics.items(): - if not isinstance(v, dict): - print(f" {k}: {v}") - - # Log to files - log_evaluation_results( - accuracy_metrics, performance_metrics, model_name, model_output_dir - ) - - plot_performance_comparison(model_names, all_performance_metrics, output_dir) - - comparison_data = [] - for i, (model_name, acc_metrics, perf_metrics) in enumerate( - zip(model_names, all_accuracy_metrics, all_performance_metrics) - ): - model_data = { - "model_name": model_name, - "accuracy": acc_metrics.get("accuracy", 0), - "precision": acc_metrics.get("precision", 0), - "recall": acc_metrics.get("recall", 0), - "f1": acc_metrics.get("f1", 0), - "avg_inference_time_ms": perf_metrics.get("avg_inference_time_ms", 0), - "samples_per_second": perf_metrics.get("samples_per_second", 0), - "model_size_mb": perf_metrics.get("model_size_mb", 0), - } - comparison_data.append(model_data) - - comparison_df = pd.DataFrame(comparison_data) - - # Save comparison table - comparison_path = os.path.join(output_dir, "model_comparison.csv") - comparison_df.to_csv(comparison_path, index=False) - - # Create summary table for markdown - markdown_table = "# Model Comparison\n\n" - markdown_table += "| Model | Accuracy | F1 Score | Precision | Recall | Inference Time | Samples/sec | Size (MB) |\n" - markdown_table += "|-------|----------|----------|-----------|--------|---------------|-------------|----------|\n" - - for _, row in comparison_df.iterrows(): - markdown_table += f"| {row['model_name']} | {row['accuracy']:.4f} | {row['f1']:.4f} | {row['precision']:.4f} | {row['recall']:.4f} | {row['avg_inference_time_ms']:.2f} ms | {row['samples_per_second']:.1f} | {row['model_size_mb']:.1f} |\n" - - markdown_path = os.path.join(output_dir, "model_comparison.md") - with open(markdown_path, "w") as f: - f.write(markdown_table) - - logger.info(f"\nComparison results saved to {output_dir}") - - # Log to MLflow if active - try: - mlflow.log_artifact(comparison_path) - mlflow.log_artifact(markdown_path) - except: - # MLflow might not be active - logger.warning("Warning: Failed to log comparison results to MLflow") - pass - - return comparison_df - - -def main(): - parser = argparse.ArgumentParser( - description="Evaluate transformer-based text classifiers" - ) - - # Single model evaluation - parser.add_argument("--model_path", type=str, help="Path to the model directory") - parser.add_argument("--model_name", type=str, help="Name of the model") - parser.add_argument("--test_data", type=str, help="Path to the test data") - parser.add_argument( - "--output_dir", - type=str, - default="evaluation_results", - help="Directory to save results", - ) - parser.add_argument( - "--mlflow_run_id", type=str, help="MLflow run ID to log results to" - ) - - # Multiple model comparison - parser.add_argument( - "--compare", action="store_true", help="Compare multiple models" - ) - parser.add_argument( - "--model_paths", - type=str, - nargs="+", - help="Paths to model directories for comparison", - ) - parser.add_argument( - "--model_names", type=str, nargs="+", help="Names of models for comparison" - ) - - # Performance evaluation options - parser.add_argument( - "--num_runs", - type=int, - default=100, - help="Number of inference runs to average over", - ) - parser.add_argument( - "--seed", type=int, default=42, help="Random seed for reproducibility" - ) - - args = parser.parse_args() - - # Set random seeds - set_random_seeds(args.seed) - - if args.compare: - # Validate arguments for comparison - if ( - not args.model_paths - or not args.model_names - or len(args.model_paths) != len(args.model_names) - ): - parser.error( - "For comparison, --model_paths and --model_names must be provided with equal length" - ) - - # Evaluate and compare models - evaluate_and_compare_models( - args.model_paths, args.model_names, args.test_data, args.output_dir - ) - else: - # Validate arguments for single model evaluation - if not args.model_path or not args.model_name or not args.test_data: - parser.error( - "--model_path, --model_name, and --test_data are required for single model evaluation" - ) - - # Set up device - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - # Load model and tokenizer - model, tokenizer = load_model_and_tokenizer(args.model_path, device) - - # Load test data - dataloader, _, labels = load_test_data(args.test_data, tokenizer) - - # Determine number of classes - num_labels = len(np.unique(labels)) - - # Create output directory - os.makedirs(args.output_dir, exist_ok=True) - - # Evaluate model accuracy - predictions, true_labels, probabilities = evaluate_model( - model, dataloader, device - ) - - # Compute accuracy metrics - accuracy_metrics, _ = compute_metrics(true_labels, predictions, probabilities) - - # Create evaluation plots - create_evaluation_plots( - predictions, true_labels, probabilities, args.model_name, args.output_dir - ) - - # Measure inference performance - performance_metrics = measure_inference_performance( - model, dataloader, device, num_runs=args.num_runs - ) - - # Print results - logger.info("\nAccuracy Metrics:") - for k, v in accuracy_metrics.items(): - if k != "confusion_matrix": - print(f" {k}: {v}") - - logger.info("\nPerformance Metrics:") - for k, v in performance_metrics.items(): - if not isinstance(v, dict): - print(f" {k}: {v}") - - # Log to files and MLflow - log_evaluation_results( - accuracy_metrics, - performance_metrics, - args.model_name, - args.output_dir, - args.mlflow_run_id, - ) - - -if __name__ == "__main__": - main() diff --git a/src/training/scripts/inference.py b/src/training/scripts/inference.py deleted file mode 100644 index 20813558..00000000 --- a/src/training/scripts/inference.py +++ /dev/null @@ -1,993 +0,0 @@ -import os -import sys -import argparse -import json -import time -import torch -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -import seaborn as sns -from loguru import logger -import mlflow -import psutil -import platform -import gc -from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer, -) - -logger.remove() -# add stout handler -logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}") - - -# Import utilities -from utils import set_random_seeds, measure_model_size - - -def get_system_info(): - """ - Get information about the system hardware. - - Returns: - dict: Dictionary of system information - """ - system_info = { - "platform": platform.platform(), - "processor": platform.processor(), - "python_version": platform.python_version(), - "torch_version": torch.__version__, - "cuda_available": torch.cuda.is_available(), - "cpu_count": os.cpu_count(), - "memory_total_gb": psutil.virtual_memory().total / (1024**3), - } - - if torch.cuda.is_available(): - system_info.update( - { - "cuda_version": torch.version.cuda, - "gpu_name": torch.cuda.get_device_name(0), - "gpu_count": torch.cuda.device_count(), - "gpu_memory_gb": torch.cuda.get_device_properties(0).total_memory - / (1024**3), - } - ) - - return system_info - - -def create_dummy_input(tokenizer, batch_size, seq_length, device): - """ - Create a dummy input batch for benchmarking. - - Args: - tokenizer: The tokenizer to use - batch_size: Number of examples in the batch - seq_length: Sequence length for each example - device: Device to put the tensor on - - Returns: - dict: Dictionary with input_ids and attention_mask - """ - # Create a dummy text sequence - text = " ".join(["word"] * (seq_length // 2)) - - # Tokenize it - sample_encoding = tokenizer( - text, - padding="max_length", - truncation=True, - max_length=seq_length, - return_tensors="pt", - ) - - # Expand to the desired batch size - input_ids = sample_encoding["input_ids"].repeat(batch_size, 1).to(device) - attention_mask = sample_encoding["attention_mask"].repeat(batch_size, 1).to(device) - - return {"input_ids": input_ids, "attention_mask": attention_mask} - - -def benchmark_inference_time(model, inputs, num_runs=100, warm_up=10): - """ - Benchmark the inference time for a model. - - Args: - model: The model to benchmark - inputs: Input tensors for the model - num_runs: Number of runs to average over - warm_up: Number of warm-up runs - - Returns: - dict: Dictionary of timing statistics - """ - model.eval() - timings = [] - - # Warm-up runs - with torch.no_grad(): - for _ in range(warm_up): - _ = model(**inputs) - - # Timed runs - with torch.no_grad(): - for _ in range(num_runs): - start_time = time.time() - _ = model(**inputs) - # Make sure GPU operations are completed - if torch.cuda.is_available(): - torch.cuda.synchronize() - end_time = time.time() - timings.append(end_time - start_time) - - # Calculate statistics - timings_ms = np.array(timings) * 1000 - - stats = { - "avg_time_ms": np.mean(timings_ms), - "min_time_ms": np.min(timings_ms), - "max_time_ms": np.max(timings_ms), - "median_time_ms": np.median(timings_ms), - "std_time_ms": np.std(timings_ms), - "p90_time_ms": np.percentile(timings_ms, 90), - "p95_time_ms": np.percentile(timings_ms, 95), - "p99_time_ms": np.percentile(timings_ms, 99), - } - - # Calculate throughput (samples per second) - batch_size = inputs["input_ids"].shape[0] - stats["samples_per_second"] = batch_size * 1000 / stats["avg_time_ms"] - - return stats - - -def benchmark_memory_usage(model, inputs): - """ - Benchmark the memory usage for a model. - - Args: - model: The model to benchmark - inputs: Input tensors for the model - - Returns: - dict: Dictionary of memory usage statistics - """ - # Force garbage collection - gc.collect() - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Measure memory before - if torch.cuda.is_available(): - torch.cuda.reset_peak_memory_stats() - memory_before = torch.cuda.memory_allocated() / (1024**2) - else: - memory_before = psutil.Process(os.getpid()).memory_info().rss / (1024**2) - - # Run inference - model.eval() - with torch.no_grad(): - _ = model(**inputs) - # Make sure GPU operations are completed - if torch.cuda.is_available(): - torch.cuda.synchronize() - - # Measure memory after - if torch.cuda.is_available(): - memory_peak = torch.cuda.max_memory_allocated() / (1024**2) - memory_current = torch.cuda.memory_allocated() / (1024**2) - memory_usage = memory_peak - memory_before - else: - memory_current = psutil.Process(os.getpid()).memory_info().rss / (1024**2) - memory_usage = memory_current - memory_before - memory_peak = memory_current - - return { - "memory_before_mb": memory_before, - "memory_peak_mb": memory_peak, - "memory_current_mb": memory_current, - "memory_usage_mb": memory_usage, - } - - -def benchmark_model( - model_path, device, batch_sizes, seq_lengths, num_runs=100, warm_up=10 -): - """ - Run comprehensive benchmarks for a model. - """ - logger.info(f"Loading model from {model_path}...") - - # Load model and tokenizer - model = AutoModelForSequenceClassification.from_pretrained(model_path) - tokenizer = AutoTokenizer.from_pretrained(model_path) - - model.to(device) - model.eval() - - # Measure model size - model_size_mb = measure_model_size(model) - logger.info(f"Model size: {model_size_mb:.2f} MB") - - # Run benchmarks for different batch sizes and sequence lengths - results = { - "model_path": model_path, - "device": str(device), - "model_size_mb": model_size_mb, - "benchmarks": {}, - } - - for batch_size in batch_sizes: - for seq_length in seq_lengths: - logger.info( - f"Benchmarking batch_size={batch_size}, seq_length={seq_length}..." - ) - - # Create dummy inputs - inputs = create_dummy_input(tokenizer, batch_size, seq_length, device) - - # Benchmark timing - timing_stats = benchmark_inference_time( - model, inputs, num_runs=num_runs, warm_up=warm_up - ) - - # Benchmark memory - memory_stats = benchmark_memory_usage(model, inputs) - - # Store results - key = f"batch_{batch_size}_seq_{seq_length}" - results["benchmarks"][key] = { - "batch_size": batch_size, - "seq_length": seq_length, - "timing": timing_stats, - "memory": memory_stats, - } - - return results - - -def compare_batch_throughput(results, output_dir): - """ - Create comparison plots for throughput vs batch size. - - Args: - results: Dictionary of benchmark results - output_dir: Directory to save plots - """ - os.makedirs(output_dir, exist_ok=True) - - # Extract batch sizes and sequence lengths - batch_sizes = sorted( - list({result["batch_size"] for result in results["benchmarks"].values()}) - ) - seq_lengths = sorted( - list({result["seq_length"] for result in results["benchmarks"].values()}) - ) - - # Create plots for each sequence length - for seq_length in seq_lengths: - data = [] - - for batch_size in batch_sizes: - key = f"batch_{batch_size}_seq_{seq_length}" - if key in results["benchmarks"]: - throughput = results["benchmarks"][key]["timing"]["samples_per_second"] - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - data.append( - { - "batch_size": batch_size, - "throughput": throughput, - "latency": latency, - } - ) - - if not data: - continue - - df = pd.DataFrame(data) - - # Create throughput plot - plt.figure(figsize=(10, 6)) - plt.plot(df["batch_size"], df["throughput"], marker="o") - plt.xlabel("Batch Size") - plt.ylabel("Throughput (samples/second)") - plt.title(f"Throughput vs Batch Size (Sequence Length = {seq_length})") - plt.grid(True) - plt.tight_layout() - - throughput_path = os.path.join(output_dir, f"throughput_seq_{seq_length}.png") - plt.savefig(throughput_path) - plt.close() - - # Create latency plot - plt.figure(figsize=(10, 6)) - plt.plot(df["batch_size"], df["latency"], marker="o") - plt.xlabel("Batch Size") - plt.ylabel("Latency (ms)") - plt.title(f"Latency vs Batch Size (Sequence Length = {seq_length})") - plt.grid(True) - plt.tight_layout() - - latency_path = os.path.join(output_dir, f"latency_seq_{seq_length}.png") - plt.savefig(latency_path) - plt.close() - - # Log to MLflow if active - try: - mlflow.log_artifact(throughput_path) - mlflow.log_artifact(latency_path) - except: - # MLflow might not be active - pass - - -def compare_sequence_length_impact(results, output_dir): - """ - Create comparison plots for latency vs sequence length. - - Args: - results: Dictionary of benchmark results - output_dir: Directory to save plots - """ - os.makedirs(output_dir, exist_ok=True) - - # Extract batch sizes and sequence lengths - batch_sizes = sorted( - list({result["batch_size"] for result in results["benchmarks"].values()}) - ) - seq_lengths = sorted( - list({result["seq_length"] for result in results["benchmarks"].values()}) - ) - - # Create plots for each batch size - for batch_size in batch_sizes: - data = [] - - for seq_length in seq_lengths: - key = f"batch_{batch_size}_seq_{seq_length}" - if key in results["benchmarks"]: - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - data.append({"seq_length": seq_length, "latency": latency}) - - if not data: - continue - - df = pd.DataFrame(data) - - # Create latency plot - plt.figure(figsize=(10, 6)) - plt.plot(df["seq_length"], df["latency"], marker="o") - plt.xlabel("Sequence Length") - plt.ylabel("Latency (ms)") - plt.title(f"Latency vs Sequence Length (Batch Size = {batch_size})") - plt.grid(True) - plt.tight_layout() - - latency_path = os.path.join(output_dir, f"seq_latency_batch_{batch_size}.png") - plt.savefig(latency_path) - plt.close() - - # Log to MLflow if active - try: - mlflow.log_artifact(latency_path) - except: - # MLflow might not be active - logger.warning( - f"Warning: Failed to log latency plot for batch size {batch_size} to MLflow" - ) - pass - - -def create_heatmap(results, output_dir): - """ - Create a heatmap of latency across batch sizes and sequence lengths. - - Args: - results: Dictionary of benchmark results - output_dir: Directory to save plots - """ - os.makedirs(output_dir, exist_ok=True) - - # Extract batch sizes and sequence lengths - batch_sizes = sorted( - list({result["batch_size"] for result in results["benchmarks"].values()}) - ) - seq_lengths = sorted( - list({result["seq_length"] for result in results["benchmarks"].values()}) - ) - - # Create data for heatmap - latency_matrix = np.zeros((len(batch_sizes), len(seq_lengths))) - throughput_matrix = np.zeros((len(batch_sizes), len(seq_lengths))) - - for i, batch_size in enumerate(batch_sizes): - for j, seq_length in enumerate(seq_lengths): - key = f"batch_{batch_size}_seq_{seq_length}" - if key in results["benchmarks"]: - latency_matrix[i, j] = results["benchmarks"][key]["timing"][ - "avg_time_ms" - ] - throughput_matrix[i, j] = results["benchmarks"][key]["timing"][ - "samples_per_second" - ] - - # Create latency heatmap - plt.figure(figsize=(12, 8)) - sns.heatmap( - latency_matrix, - annot=True, - fmt=".1f", - xticklabels=seq_lengths, - yticklabels=batch_sizes, - cmap="YlOrRd", - ) - plt.xlabel("Sequence Length") - plt.ylabel("Batch Size") - plt.title("Inference Latency (ms)") - plt.tight_layout() - - latency_heatmap_path = os.path.join(output_dir, "latency_heatmap.png") - plt.savefig(latency_heatmap_path) - plt.close() - - # Create throughput heatmap - plt.figure(figsize=(12, 8)) - sns.heatmap( - throughput_matrix, - annot=True, - fmt=".1f", - xticklabels=seq_lengths, - yticklabels=batch_sizes, - cmap="viridis", - ) - plt.xlabel("Sequence Length") - plt.ylabel("Batch Size") - plt.title("Throughput (samples/second)") - plt.tight_layout() - - throughput_heatmap_path = os.path.join(output_dir, "throughput_heatmap.png") - plt.savefig(throughput_heatmap_path) - plt.close() - - # Log to MLflow if active - try: - mlflow.log_artifact(latency_heatmap_path) - mlflow.log_artifact(throughput_heatmap_path) - except: - # MLflow might not be active - logger.warning("Warning: Failed to log heatmap plots to MLflow") - pass - - -def create_summary_report(results, system_info, output_dir): - """ - Create a markdown summary report of the benchmarks. - - Args: - results: Dictionary of benchmark results - system_info: Dictionary of system information - output_dir: Directory to save the report - """ - os.makedirs(output_dir, exist_ok=True) - - model_path = results["model_path"] - model_name = os.path.basename(model_path) - - # Create report content - report = f"# Inference Performance Report for {model_name}\n\n" - - # System information - report += "## System Information\n\n" - report += "| Component | Details |\n" - report += "|-----------|--------|\n" - for key, value in system_info.items(): - if isinstance(value, float): - value = f"{value:.2f}" - report += f"| {key} | {value} |\n" - - report += "\n## Model Information\n\n" - report += f"- Model path: {model_path}\n" - report += f"- Model size: {results['model_size_mb']:.2f} MB\n" - report += f"- Device: {results['device']}\n\n" - - # Create a summary table for batch size = 1 - report += "## Inference Latency (Batch Size = 1)\n\n" - report += "| Sequence Length | Latency (ms) | Throughput (samples/sec) |\n" - report += "|-----------------|-------------|---------------------------|\n" - - seq_lengths = sorted( - list({result["seq_length"] for result in results["benchmarks"].values()}) - ) - - for seq_length in seq_lengths: - key = f"batch_1_seq_{seq_length}" - if key in results["benchmarks"]: - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - throughput = results["benchmarks"][key]["timing"]["samples_per_second"] - report += f"| {seq_length} | {latency:.2f} | {throughput:.2f} |\n" - - # Create a table for batch effects on typical length - mid_seq = seq_lengths[len(seq_lengths) // 2] if seq_lengths else 128 - - report += f"\n## Batch Size Impact (Sequence Length = {mid_seq})\n\n" - report += "| Batch Size | Latency (ms) | Throughput (samples/sec) |\n" - report += "|------------|-------------|---------------------------|\n" - - batch_sizes = sorted( - list({result["batch_size"] for result in results["benchmarks"].values()}) - ) - - for batch_size in batch_sizes: - key = f"batch_{batch_size}_seq_{mid_seq}" - if key in results["benchmarks"]: - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - throughput = results["benchmarks"][key]["timing"]["samples_per_second"] - report += f"| {batch_size} | {latency:.2f} | {throughput:.2f} |\n" - - # Memory usage - report += "\n## Memory Usage\n\n" - report += "| Batch Size | Sequence Length | Memory Usage (MB) |\n" - report += "|------------|-----------------|-------------------|\n" - - for batch_size in batch_sizes: - for seq_length in seq_lengths: - key = f"batch_{batch_size}_seq_{seq_length}" - if key in results["benchmarks"]: - memory = results["benchmarks"][key]["memory"]["memory_usage_mb"] - report += f"| {batch_size} | {seq_length} | {memory:.2f} |\n" - - # Detailed timing statistics for a typical case - key = f"batch_1_seq_{mid_seq}" - if key in results["benchmarks"]: - report += ( - "\n## Detailed Timing Statistics (Batch Size = 1, Sequence Length = " - + str(mid_seq) - + ")\n\n" - ) - report += "| Metric | Value |\n" - report += "|--------|-------|\n" - - timing = results["benchmarks"][key]["timing"] - for k, v in timing.items(): - if k != "samples_per_second": - report += f"| {k} | {v:.2f} ms |\n" - - # Write the report - report_path = os.path.join(output_dir, f"{model_name}_inference_report.md") - with open(report_path, "w") as f: - f.write(report) - - logger.info(f"Report saved to {report_path}") - - # Log to MLflow if active - try: - mlflow.log_artifact(report_path) - except: - # MLflow might not be active - logger.warning("Warning: Failed to log report to MLflow") - pass - - -def compare_models(model_results, output_dir): - """ - Compare multiple models in terms of inference performance. - - Args: - model_results: List of benchmark results for different models - output_dir: Directory to save comparison results - """ - os.makedirs(output_dir, exist_ok=True) - - model_names = [os.path.basename(results["model_path"]) for results in model_results] - - # Extract batch sizes and sequence lengths - batch_sizes = set() - seq_lengths = set() - - for results in model_results: - for result in results["benchmarks"].values(): - batch_sizes.add(result["batch_size"]) - seq_lengths.add(result["seq_length"]) - - batch_sizes = sorted(list(batch_sizes)) - seq_lengths = sorted(list(seq_lengths)) - - # Compare latency for batch size = 1 across different sequence lengths - data = [] - - for model_idx, results in enumerate(model_results): - for seq_length in seq_lengths: - key = f"batch_1_seq_{seq_length}" - if key in results["benchmarks"]: - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - throughput = results["benchmarks"][key]["timing"]["samples_per_second"] - - data.append( - { - "model": model_names[model_idx], - "seq_length": seq_length, - "latency": latency, - "throughput": throughput, - } - ) - - if data: - df = pd.DataFrame(data) - - # Plot latency comparison - plt.figure(figsize=(12, 6)) - sns.lineplot(data=df, x="seq_length", y="latency", hue="model", marker="o") - plt.xlabel("Sequence Length") - plt.ylabel("Latency (ms)") - plt.title("Inference Latency Comparison (Batch Size = 1)") - plt.grid(True) - plt.tight_layout() - - latency_comp_path = os.path.join(output_dir, "model_latency_comparison.png") - plt.savefig(latency_comp_path) - plt.close() - - # Plot throughput comparison - plt.figure(figsize=(12, 6)) - sns.lineplot(data=df, x="seq_length", y="throughput", hue="model", marker="o") - plt.xlabel("Sequence Length") - plt.ylabel("Throughput (samples/second)") - plt.title("Inference Throughput Comparison (Batch Size = 1)") - plt.grid(True) - plt.tight_layout() - - throughput_comp_path = os.path.join( - output_dir, "model_throughput_comparison.png" - ) - plt.savefig(throughput_comp_path) - plt.close() - - # Compare latency for different batch sizes with a fixed sequence length - mid_seq = seq_lengths[len(seq_lengths) // 2] if seq_lengths else 128 - data = [] - - for model_idx, results in enumerate(model_results): - for batch_size in batch_sizes: - key = f"batch_{batch_size}_seq_{mid_seq}" - if key in results["benchmarks"]: - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - throughput = results["benchmarks"][key]["timing"]["samples_per_second"] - - data.append( - { - "model": model_names[model_idx], - "batch_size": batch_size, - "latency": latency, - "throughput": throughput, - } - ) - - if data: - df = pd.DataFrame(data) - - # Plot latency comparison for different batch sizes - plt.figure(figsize=(12, 6)) - sns.lineplot(data=df, x="batch_size", y="latency", hue="model", marker="o") - plt.xlabel("Batch Size") - plt.ylabel("Latency (ms)") - plt.title(f"Inference Latency Comparison (Sequence Length = {mid_seq})") - plt.grid(True) - plt.tight_layout() - - batch_latency_path = os.path.join(output_dir, "batch_latency_comparison.png") - plt.savefig(batch_latency_path) - plt.close() - - # Plot throughput comparison for different batch sizes - plt.figure(figsize=(12, 6)) - sns.lineplot(data=df, x="batch_size", y="throughput", hue="model", marker="o") - plt.xlabel("Batch Size") - plt.ylabel("Throughput (samples/second)") - plt.title(f"Inference Throughput Comparison (Sequence Length = {mid_seq})") - plt.grid(True) - plt.tight_layout() - - batch_throughput_path = os.path.join( - output_dir, "batch_throughput_comparison.png" - ) - plt.savefig(batch_throughput_path) - plt.close() - - # Create a comparison table - comparison_table = "# Model Inference Performance Comparison\n\n" - - # Model size comparison - comparison_table += "## Model Size\n\n" - comparison_table += "| Model | Size (MB) |\n" - comparison_table += "|-------|----------|\n" - - for model_idx, results in enumerate(model_results): - comparison_table += ( - f"| {model_names[model_idx]} | {results['model_size_mb']:.2f} |\n" - ) - - # Latency comparison for batch=1, seq=128 - comparison_table += "\n## Single Sample Inference Latency (ms)\n\n" - comparison_table += "| Model | " - for seq_length in seq_lengths: - comparison_table += f"Seq={seq_length} | " - comparison_table += "\n|-------|" - for _ in seq_lengths: - comparison_table += "-------|" - comparison_table += "\n" - - for model_idx, results in enumerate(model_results): - comparison_table += f"| {model_names[model_idx]} | " - for seq_length in seq_lengths: - key = f"batch_1_seq_{seq_length}" - if key in results["benchmarks"]: - latency = results["benchmarks"][key]["timing"]["avg_time_ms"] - comparison_table += f"{latency:.2f} | " - else: - comparison_table += "N/A | " - comparison_table += "\n" - - # Throughput comparison for batch=32, seq=128 - comparison_table += "\n## Throughput (samples/second) with Batch Size = 32\n\n" - comparison_table += "| Model | " - for seq_length in seq_lengths: - comparison_table += f"Seq={seq_length} | " - comparison_table += "\n|-------|" - for _ in seq_lengths: - comparison_table += "-------|" - comparison_table += "\n" - - for model_idx, results in enumerate(model_results): - comparison_table += f"| {model_names[model_idx]} | " - for seq_length in seq_lengths: - key = f"batch_32_seq_{seq_length}" - if key in results["benchmarks"]: - throughput = results["benchmarks"][key]["timing"]["samples_per_second"] - comparison_table += f"{throughput:.2f} | " - else: - comparison_table += "N/A | " - comparison_table += "\n" - - # Write the comparison table - comparison_path = os.path.join(output_dir, "model_comparison.md") - with open(comparison_path, "w") as f: - f.write(comparison_table) - - logger.info(f"Comparison saved to {comparison_path}") - - # Log to MLflow if active - try: - mlflow.log_artifact(latency_comp_path) - mlflow.log_artifact(throughput_comp_path) - mlflow.log_artifact(batch_latency_path) - mlflow.log_artifact(batch_throughput_path) - mlflow.log_artifact(comparison_path) - except: - # MLflow might not be active - logger.warning("Warning: Failed to log comparison plots to MLflow") - pass - - -def main(): - parser = argparse.ArgumentParser( - description="Measure inference performance for transformer models" - ) - - # Single model benchmark - parser.add_argument("--model_path", type=str, help="Path to the model") - parser.add_argument( - "--output_dir", - type=str, - default="inference_results", - help="Directory to save results", - ) - parser.add_argument( - "--mlflow_run_id", type=str, help="MLflow run ID to log results to" - ) - - # Multiple model comparison - parser.add_argument( - "--compare", action="store_true", help="Compare multiple models" - ) - parser.add_argument( - "--model_paths", type=str, nargs="+", help="Paths to models for comparison" - ) - - # Benchmark parameters - parser.add_argument( - "--batch_sizes", - type=int, - nargs="+", - default=[1, 2, 4, 8, 16, 32, 64, 128], - help="Batch sizes to benchmark", - ) - parser.add_argument( - "--seq_lengths", - type=int, - nargs="+", - default=[32, 64, 128, 256, 512], - help="Sequence lengths to benchmark", - ) - parser.add_argument( - "--num_runs", type=int, default=100, help="Number of runs to average over" - ) - parser.add_argument( - "--warm_up", type=int, default=10, help="Number of warm-up runs" - ) - parser.add_argument( - "--seed", type=int, default=42, help="Random seed for reproducibility" - ) - parser.add_argument( - "--cpu_only", - action="store_true", - help="Force CPU inference even if CUDA is available", - ) - - args = parser.parse_args() - - # Set random seeds - set_random_seeds(args.seed) - - # Set up device - if args.cpu_only: - device = torch.device("cpu") - else: - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - logger.info(f"Using device: {device}") - - # Get system information - system_info = get_system_info() - logger.info("System information:") - for key, value in system_info.items(): - logger.info(f" {key}: {value}") - - if args.compare: - # Validate arguments for comparison - if not args.model_paths: - parser.error("--model_paths must be provided for comparison") - - # Benchmark multiple models - model_results = [] - - for model_path in args.model_paths: - logger.info(f"\nBenchmarking model: {model_path}") - model_output_dir = os.path.join( - args.output_dir, os.path.basename(model_path) - ) - os.makedirs(model_output_dir, exist_ok=True) - - results = benchmark_model( - model_path, - device, - args.batch_sizes, - args.seq_lengths, - args.num_runs, - args.warm_up, - ) - - # Save results to file - results_path = os.path.join(model_output_dir, "benchmark_results.json") - with open(results_path, "w") as f: - # Convert any non-serializable values - def convert_for_json(obj): - if isinstance( - obj, - ( - np.int_, - np.intc, - np.intp, - np.int8, - np.int16, - np.int32, - np.int64, - ), - ): - return int(obj) - elif isinstance( - obj, (np.float_, np.float16, np.float32, np.float64) - ): - return float(obj) - elif isinstance(obj, (np.ndarray,)): - return obj.tolist() - elif isinstance(obj, dict): - return {k: convert_for_json(v) for k, v in obj.items()} - elif isinstance(obj, list): - return [convert_for_json(i) for i in obj] - else: - return obj - - json.dump(convert_for_json(results), f, indent=2) - - # Create individual model plots - compare_batch_throughput(results, model_output_dir) - compare_sequence_length_impact(results, model_output_dir) - create_heatmap(results, model_output_dir) - - # Create summary report - create_summary_report(results, system_info, model_output_dir) - - model_results.append(results) - - # Compare models - compare_models(model_results, args.output_dir) - - else: - # Validate arguments for single model - if not args.model_path: - parser.error("--model_path is required for single model benchmarking") - - # Benchmark single model - results = benchmark_model( - args.model_path, - device, - args.batch_sizes, - args.seq_lengths, - args.num_runs, - args.warm_up, - ) - - # Save results to file - os.makedirs(args.output_dir, exist_ok=True) - results_path = os.path.join(args.output_dir, "benchmark_results.json") - with open(results_path, "w") as f: - # Convert any non-serializable values - def convert_for_json(obj): - if isinstance( - obj, - (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64), - ): - return int(obj) - elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)): - return float(obj) - elif isinstance(obj, (np.ndarray,)): - return obj.tolist() - elif isinstance(obj, dict): - return {k: convert_for_json(v) for k, v in obj.items()} - elif isinstance(obj, list): - return [convert_for_json(i) for i in obj] - else: - return obj - - json.dump(convert_for_json(results), f, indent=2) - - # Create plots - compare_batch_throughput(results, args.output_dir) - compare_sequence_length_impact(results, args.output_dir) - create_heatmap(results, args.output_dir) - - # Create summary report - create_summary_report(results, system_info, args.output_dir) - - # Log to MLflow if run ID is provided - if args.mlflow_run_id: - try: - mlflow.set_tracking_uri(os.environ.get("MLFLOW_TRACKING_URI", "mlruns")) - with mlflow.start_run(run_id=args.mlflow_run_id): - # Log the benchmark results file - mlflow.log_artifact(results_path) - - # Log key metrics - key = f"batch_1_seq_128" - if key in results["benchmarks"]: - mlflow.log_metric( - "inference_latency_ms", - results["benchmarks"][key]["timing"]["avg_time_ms"], - ) - mlflow.log_metric( - "inference_throughput", - results["benchmarks"][key]["timing"]["samples_per_second"], - ) - - mlflow.log_metric("model_size_mb", results["model_size_mb"]) - - # Log system info - mlflow.log_dict(system_info, "system_info.json") - except Exception as e: - logger.warning(f"Warning: Failed to log to MLflow: {str(e)}") - - -if __name__ == "__main__": - main() diff --git a/src/training/scripts/mlflow_log.py b/src/training/scripts/mlflow_log.py deleted file mode 100644 index 828bd6db..00000000 --- a/src/training/scripts/mlflow_log.py +++ /dev/null @@ -1,763 +0,0 @@ -import os -import argparse -import json -import glob -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -from datetime import datetime -import mlflow -from mlflow.tracking import MlflowClient -from pathlib import Path - - -def setup_mlflow(tracking_uri, experiment_name): - """ - Set up MLflow tracking and create or get the experiment. - - Args: - tracking_uri (str): URI for MLflow tracking server - experiment_name (str): Name of the experiment - - Returns: - str: Experiment ID - """ - mlflow.set_tracking_uri(tracking_uri) - - # Get or create the experiment - experiment = mlflow.get_experiment_by_name(experiment_name) - if experiment: - experiment_id = experiment.experiment_id - else: - experiment_id = mlflow.create_experiment(experiment_name) - - print(f"MLflow experiment '{experiment_name}' (ID: {experiment_id}) is ready") - return experiment_id - - -def log_model_training(model_dir, metrics_file, artifacts_dir=None, tags=None): - """ - Log model training metrics and artifacts to MLflow. - - Args: - model_dir (str): Directory containing the model files - metrics_file (str): Path to the metrics JSON file - artifacts_dir (str, optional): Directory containing additional artifacts - tags (dict, optional): Tags to add to the run - - Returns: - str: MLflow run ID - """ - # Load metrics - with open(metrics_file, "r") as f: - metrics_data = json.load(f) - - model_type = metrics_data.get("model_type", os.path.basename(model_dir)) - - # Start MLflow run - with mlflow.start_run( - run_name=f"{model_type}_training_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - ) as run: - run_id = run.info.run_id - - # Log parameters - params = { - k: v - for k, v in metrics_data.items() - if not isinstance(v, (dict, list)) and k != "timestamp" - } - mlflow.log_params(params) - - # Log metrics - if "test_metrics" in metrics_data: - for k, v in metrics_data["test_metrics"].items(): - if not isinstance(v, (dict, list, np.ndarray)): - mlflow.log_metric(f"test_{k}", v) - - # Log training time if available - if "training_time_seconds" in metrics_data: - mlflow.log_metric( - "training_time_seconds", metrics_data["training_time_seconds"] - ) - - # Log inference time if available - if "avg_inference_time_seconds" in metrics_data: - mlflow.log_metric( - "avg_inference_time_seconds", metrics_data["avg_inference_time_seconds"] - ) - - # Log the model if available - model_files = glob.glob(os.path.join(model_dir, "*.bin")) + glob.glob( - os.path.join(model_dir, "*.pt") - ) - if model_files: - mlflow.log_artifact(model_dir, "model") - - # Log the metrics file itself - mlflow.log_artifact(metrics_file, "metrics") - - # Log additional artifacts if provided - if artifacts_dir and os.path.exists(artifacts_dir): - for artifact_file in glob.glob(os.path.join(artifacts_dir, "*")): - if os.path.isfile(artifact_file): - mlflow.log_artifact(artifact_file, "artifacts") - - # Add tags if provided - if tags: - mlflow.set_tags(tags) - - # Add default tags - mlflow.set_tag("model_type", model_type) - mlflow.set_tag("stage", "training") - - print(f"Training metrics logged to MLflow run: {run_id}") - return run_id - - -def log_model_evaluation(evaluation_file, run_id=None, artifacts_dir=None, tags=None): - """ - Log model evaluation metrics to MLflow. - - Args: - evaluation_file (str): Path to the evaluation results JSON file - run_id (str, optional): Existing MLflow run ID to log to - artifacts_dir (str, optional): Directory containing additional artifacts - tags (dict, optional): Tags to add to the run - - Returns: - str: MLflow run ID - """ - # Load evaluation results - with open(evaluation_file, "r") as f: - eval_data = json.load(f) - - model_name = eval_data.get( - "model_name", os.path.basename(os.path.dirname(evaluation_file)) - ) - - # Determine whether to create a new run or use an existing one - if run_id: - # Check if the run exists - try: - client = MlflowClient() - client.get_run(run_id) - new_run = False - except: - print(f"Run ID {run_id} not found. Creating a new run.") - new_run = True - else: - new_run = True - - if new_run: - # Start a new MLflow run - with mlflow.start_run( - run_name=f"{model_name}_evaluation_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - ) as run: - run_id = run.info.run_id - - # Log accuracy metrics - if "accuracy_metrics" in eval_data: - for k, v in eval_data["accuracy_metrics"].items(): - if not isinstance(v, (dict, list, np.ndarray)): - mlflow.log_metric(f"eval_{k}", v) - - # Log performance metrics - if "performance_metrics" in eval_data: - for k, v in eval_data["performance_metrics"].items(): - if not isinstance(v, (dict, list, np.ndarray)): - mlflow.log_metric(k, v) - - # Log the evaluation file itself - mlflow.log_artifact(evaluation_file, "evaluation") - - # Log additional artifacts if provided - if artifacts_dir and os.path.exists(artifacts_dir): - for artifact_file in glob.glob(os.path.join(artifacts_dir, "*")): - if os.path.isfile(artifact_file): - mlflow.log_artifact(artifact_file, "artifacts") - - # Add tags if provided - if tags: - mlflow.set_tags(tags) - - # Add default tags - mlflow.set_tag("model_name", model_name) - mlflow.set_tag("stage", "evaluation") - else: - # Log to existing run - with mlflow.start_run(run_id=run_id): - # Log accuracy metrics - if "accuracy_metrics" in eval_data: - for k, v in eval_data["accuracy_metrics"].items(): - if not isinstance(v, (dict, list, np.ndarray)): - mlflow.log_metric(f"eval_{k}", v) - - # Log performance metrics - if "performance_metrics" in eval_data: - for k, v in eval_data["performance_metrics"].items(): - if not isinstance(v, (dict, list, np.ndarray)): - mlflow.log_metric(k, v) - - # Log the evaluation file itself - mlflow.log_artifact(evaluation_file, "evaluation") - - # Log additional artifacts if provided - if artifacts_dir and os.path.exists(artifacts_dir): - for artifact_file in glob.glob(os.path.join(artifacts_dir, "*")): - if os.path.isfile(artifact_file): - mlflow.log_artifact(artifact_file, "artifacts") - - # Add tags if provided - if tags: - mlflow.set_tags(tags) - - # Update stage tag - mlflow.set_tag("stage", "training+evaluation") - - print(f"Evaluation metrics logged to MLflow run: {run_id}") - return run_id - - -def log_inference_performance( - inference_file, run_id=None, artifacts_dir=None, tags=None -): - """ - Log inference performance metrics to MLflow. - - Args: - inference_file (str): Path to the inference results JSON file - run_id (str, optional): Existing MLflow run ID to log to - artifacts_dir (str, optional): Directory containing additional artifacts - tags (dict, optional): Tags to add to the run - - Returns: - str: MLflow run ID - """ - # Load inference results - with open(inference_file, "r") as f: - inference_data = json.load(f) - - model_name = os.path.basename( - inference_data.get("model_path", os.path.dirname(inference_file)) - ) - - # Determine whether to create a new run or use an existing one - if run_id: - # Check if the run exists - try: - client = MlflowClient() - client.get_run(run_id) - new_run = False - except: - print(f"Run ID {run_id} not found. Creating a new run.") - new_run = True - else: - new_run = True - - if new_run: - # Start a new MLflow run - with mlflow.start_run( - run_name=f"{model_name}_inference_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - ) as run: - run_id = run.info.run_id - - # Log model size - if "model_size_mb" in inference_data: - mlflow.log_metric("model_size_mb", inference_data["model_size_mb"]) - - # Log batch size = 1, seq_length = 128 metrics as standard benchmarks - key = "batch_1_seq_128" - if "benchmarks" in inference_data and key in inference_data["benchmarks"]: - timing = inference_data["benchmarks"][key]["timing"] - mlflow.log_metric("inference_latency_ms", timing["avg_time_ms"]) - mlflow.log_metric("inference_throughput", timing["samples_per_second"]) - - memory = inference_data["benchmarks"][key]["memory"] - mlflow.log_metric("inference_memory_mb", memory["memory_usage_mb"]) - - # Log the inference file itself - mlflow.log_artifact(inference_file, "inference") - - # Log additional artifacts if provided - if artifacts_dir and os.path.exists(artifacts_dir): - for artifact_file in glob.glob(os.path.join(artifacts_dir, "*")): - if os.path.isfile(artifact_file): - mlflow.log_artifact(artifact_file, "artifacts") - - # Add tags if provided - if tags: - mlflow.set_tags(tags) - - # Add default tags - mlflow.set_tag("model_name", model_name) - mlflow.set_tag("stage", "inference") - else: - # Log to existing run - with mlflow.start_run(run_id=run_id): - # Log model size - if "model_size_mb" in inference_data: - mlflow.log_metric("model_size_mb", inference_data["model_size_mb"]) - - # Log batch size = 1, seq_length = 128 metrics as standard benchmarks - key = "batch_1_seq_128" - if "benchmarks" in inference_data and key in inference_data["benchmarks"]: - timing = inference_data["benchmarks"][key]["timing"] - mlflow.log_metric("inference_latency_ms", timing["avg_time_ms"]) - mlflow.log_metric("inference_throughput", timing["samples_per_second"]) - - memory = inference_data["benchmarks"][key]["memory"] - mlflow.log_metric("inference_memory_mb", memory["memory_usage_mb"]) - - # Log the inference file itself - mlflow.log_artifact(inference_file, "inference") - - # Log additional artifacts if provided - if artifacts_dir and os.path.exists(artifacts_dir): - for artifact_file in glob.glob(os.path.join(artifacts_dir, "*")): - if os.path.isfile(artifact_file): - mlflow.log_artifact(artifact_file, "artifacts") - - # Add tags if provided - if tags: - mlflow.set_tags(tags) - - # Update stage tag to indicate this run includes all stages - mlflow.set_tag("stage", "training+evaluation+inference") - - print(f"Inference metrics logged to MLflow run: {run_id}") - return run_id - - -def create_comparison_artifacts(experiment_id, output_dir, model_types=None): - """ - Create and log comparison artifacts for multiple models in an experiment. - - Args: - experiment_id (str): MLflow experiment ID - output_dir (str): Directory to save artifacts - model_types (list, optional): List of model types to compare - """ - os.makedirs(output_dir, exist_ok=True) - - # Get all runs for the experiment - client = MlflowClient() - runs = client.search_runs(experiment_ids=[experiment_id]) - - # Filter runs if model_types is provided - if model_types: - filtered_runs = [] - for run in runs: - if ( - "model_type" in run.data.tags - and run.data.tags["model_type"] in model_types - ): - filtered_runs.append(run) - runs = filtered_runs - - if not runs: - print("No runs found for comparison") - return - - # Extract metrics for comparison - comparison_data = [] - - for run in runs: - run_data = { - "run_id": run.info.run_id, - "model_type": run.data.tags.get("model_type", "unknown"), - "stage": run.data.tags.get("stage", "unknown"), - } - - # Add all metrics - for key, value in run.data.metrics.items(): - run_data[key] = value - - comparison_data.append(run_data) - - # Convert to DataFrame - comparison_df = pd.DataFrame(comparison_data) - - # Save comparison data - comparison_csv = os.path.join(output_dir, "model_comparison.csv") - comparison_df.to_csv(comparison_csv, index=False) - - # Create comparison plots - metrics_to_compare = [ - "test_accuracy", - "test_f1", - "test_precision", - "test_recall", - "avg_inference_time_seconds", - "inference_latency_ms", - "inference_throughput", - "model_size_mb", - "training_time_seconds", - ] - - for metric in metrics_to_compare: - if metric in comparison_df.columns: - plt.figure(figsize=(10, 6)) - - # Filter rows that have this metric - metric_df = comparison_df[comparison_df[metric].notna()] - - if len(metric_df) > 0: - # Create plot - ax = sns.barplot(x="model_type", y=metric, data=metric_df) - - # Add value labels on top of each bar - for i, v in enumerate(metric_df[metric]): - ax.text(i, v, f"{v:.4f}", ha="center", va="bottom") - - plt.title(f"Comparison of {metric}") - plt.tight_layout() - - # Save plot - plot_path = os.path.join(output_dir, f"compare_{metric}.png") - plt.savefig(plot_path) - plt.close() - - # Create radar chart for model comparison if we have at least 2 models - model_groups = comparison_df.groupby("model_type") - if len(model_groups) >= 2: - # Select metrics for radar chart - radar_metrics = [ - m - for m in ["test_accuracy", "test_f1", "test_precision", "test_recall"] - if m in comparison_df.columns - ] - - if radar_metrics: - # Get mean values for each model type and metric - radar_data = model_groups[radar_metrics].mean() - - # Create radar chart - create_radar_chart( - radar_data, os.path.join(output_dir, "model_radar_comparison.png") - ) - - # Create a summary markdown document - create_comparison_markdown(comparison_df, output_dir) - - # Log artifacts to MLflow - with mlflow.start_run(): - for file in glob.glob(os.path.join(output_dir, "*")): - mlflow.log_artifact(file) - - mlflow.set_tag("artifact_type", "model_comparison") - - -def create_radar_chart(data, output_path): - """ - Create a radar chart for model comparison. - - Args: - data (DataFrame): DataFrame with models as rows and metrics as columns - output_path (str): Path to save the radar chart - """ - # Number of metrics - metrics = data.columns.tolist() - num_metrics = len(metrics) - - # Number of models - models = data.index.tolist() - - # Create angles for each metric - angles = np.linspace(0, 2 * np.pi, num_metrics, endpoint=False).tolist() - angles += angles[:1] # Close the polygon - - # Create figure - fig, ax = plt.subplots(figsize=(10, 8), subplot_kw=dict(polar=True)) - - # Add each model to the chart - for i, model in enumerate(models): - values = data.loc[model].tolist() - values += values[:1] # Close the polygon - - # Plot values - ax.plot(angles, values, linewidth=2, label=model) - ax.fill(angles, values, alpha=0.1) - - # Add metric labels - ax.set_xticks(angles[:-1]) - ax.set_xticklabels(metrics) - - # Add legend and title - ax.legend(loc="upper right") - plt.title("Model Comparison Radar Chart") - - # Save the chart - plt.tight_layout() - plt.savefig(output_path) - plt.close() - - -def create_comparison_markdown(comparison_df, output_dir): - """ - Create a markdown document summarizing model comparisons. - - Args: - comparison_df (DataFrame): DataFrame with model comparison data - output_dir (str): Directory to save the markdown file - """ - # Filter to get the most complete runs for each model type - best_runs = [] - for model_type, group in comparison_df.groupby("model_type"): - # Find the run with the most metrics - metric_counts = group.notna().sum(axis=1) - best_run_idx = metric_counts.idxmax() - best_runs.append(group.loc[best_run_idx]) - - if not best_runs: - return - - best_runs_df = pd.DataFrame(best_runs) - - # Create markdown content - md_content = "# Model Comparison Summary\n\n" - md_content += "## Performance Metrics\n\n" - - # Create table header - md_content += "| Model |" - - accuracy_metrics = [ - col - for col in best_runs_df.columns - if col.startswith("test_") and col != "test_metrics" - ] - for metric in accuracy_metrics: - md_content += f" {metric} |" - - md_content += "\n|" + " --- |" * (len(accuracy_metrics) + 1) + "\n" - - # Add rows for each model - for _, row in best_runs_df.iterrows(): - md_content += f"| {row['model_type']} |" - - for metric in accuracy_metrics: - if pd.notna(row.get(metric)): - md_content += f" {row[metric]:.4f} |" - else: - md_content += " - |" - - md_content += "\n" - - # Add inference performance section - md_content += "\n## Inference Performance\n\n" - - # Create table header - md_content += "| Model |" - - inference_metrics = [ - "inference_latency_ms", - "inference_throughput", - "model_size_mb", - ] - for metric in inference_metrics: - if metric in best_runs_df.columns: - md_content += f" {metric} |" - - md_content += ( - "\n|" - + " --- |" - * (len([m for m in inference_metrics if m in best_runs_df.columns]) + 1) - + "\n" - ) - - # Add rows for each model - for _, row in best_runs_df.iterrows(): - md_content += f"| {row['model_type']} |" - - for metric in inference_metrics: - if metric in best_runs_df.columns and pd.notna(row.get(metric)): - if metric == "inference_latency_ms": - md_content += f" {row[metric]:.2f} ms |" - elif metric == "inference_throughput": - md_content += f" {row[metric]:.2f} samples/s |" - elif metric == "model_size_mb": - md_content += f" {row[metric]:.2f} MB |" - else: - md_content += f" {row[metric]:.4f} |" - else: - md_content += " - |" - - md_content += "\n" - - # Add training information section if available - if "training_time_seconds" in best_runs_df.columns: - md_content += "\n## Training Information\n\n" - - # Create table header - md_content += "| Model | Training Time |\n" - md_content += "| --- | --- |\n" - - # Add rows for each model - for _, row in best_runs_df.iterrows(): - md_content += f"| {row['model_type']} |" - - if pd.notna(row.get("training_time_seconds")): - # Convert seconds to a readable format - seconds = row["training_time_seconds"] - if seconds < 60: - time_str = f"{seconds:.2f} seconds" - elif seconds < 3600: - minutes = seconds / 60 - time_str = f"{minutes:.2f} minutes" - else: - hours = seconds / 3600 - time_str = f"{hours:.2f} hours" - - md_content += f" {time_str} |\n" - else: - md_content += " - |\n" - - # Write markdown file - md_path = os.path.join(output_dir, "model_comparison_summary.md") - with open(md_path, "w") as f: - f.write(md_content) - - -def main(): - parser = argparse.ArgumentParser(description="Log model experiments to MLflow") - - # MLflow setup - parser.add_argument( - "--tracking_uri", type=str, default="mlruns", help="MLflow tracking URI" - ) - parser.add_argument( - "--experiment_name", - type=str, - default="text_classification", - help="MLflow experiment name", - ) - - # Subparsers for different commands - subparsers = parser.add_subparsers(dest="command", help="Command to execute") - - # Training parser - train_parser = subparsers.add_parser("train", help="Log training metrics") - train_parser.add_argument( - "--model_dir", - type=str, - required=True, - help="Directory containing the model files", - ) - train_parser.add_argument( - "--metrics_file", type=str, required=True, help="Path to the metrics JSON file" - ) - train_parser.add_argument( - "--artifacts_dir", type=str, help="Directory containing additional artifacts" - ) - - # Evaluation parser - eval_parser = subparsers.add_parser("evaluate", help="Log evaluation metrics") - eval_parser.add_argument( - "--evaluation_file", - type=str, - required=True, - help="Path to the evaluation results JSON file", - ) - eval_parser.add_argument( - "--run_id", type=str, help="Existing MLflow run ID to log to" - ) - eval_parser.add_argument( - "--artifacts_dir", type=str, help="Directory containing additional artifacts" - ) - - # Inference parser - inference_parser = subparsers.add_parser("inference", help="Log inference metrics") - inference_parser.add_argument( - "--inference_file", - type=str, - required=True, - help="Path to the inference results JSON file", - ) - inference_parser.add_argument( - "--run_id", type=str, help="Existing MLflow run ID to log to" - ) - inference_parser.add_argument( - "--artifacts_dir", type=str, help="Directory containing additional artifacts" - ) - - # Compare parser - compare_parser = subparsers.add_parser( - "compare", help="Create model comparison artifacts" - ) - compare_parser.add_argument( - "--output_dir", - type=str, - required=True, - help="Directory to save comparison artifacts", - ) - compare_parser.add_argument( - "--model_types", type=str, nargs="+", help="List of model types to compare" - ) - - # Log complete run parser (training + evaluation + inference) - complete_parser = subparsers.add_parser( - "complete", help="Log a complete run with all metrics" - ) - complete_parser.add_argument( - "--model_dir", - type=str, - required=True, - help="Directory containing the model files", - ) - complete_parser.add_argument( - "--training_metrics", - type=str, - required=True, - help="Path to the training metrics JSON file", - ) - complete_parser.add_argument( - "--evaluation_file", - type=str, - required=True, - help="Path to the evaluation results JSON file", - ) - complete_parser.add_argument( - "--inference_file", - type=str, - required=True, - help="Path to the inference results JSON file", - ) - complete_parser.add_argument( - "--artifacts_dir", type=str, help="Directory containing additional artifacts" - ) - - args = parser.parse_args() - - # Setup MLflow - experiment_id = setup_mlflow(args.tracking_uri, args.experiment_name) - - if args.command == "train": - log_model_training(args.model_dir, args.metrics_file, args.artifacts_dir) - - elif args.command == "evaluate": - log_model_evaluation(args.evaluation_file, args.run_id, args.artifacts_dir) - - elif args.command == "inference": - log_inference_performance(args.inference_file, args.run_id, args.artifacts_dir) - - elif args.command == "compare": - create_comparison_artifacts(experiment_id, args.output_dir, args.model_types) - - elif args.command == "complete": - # Start with training - run_id = log_model_training(args.model_dir, args.training_metrics) - - # Add evaluation metrics to the same run - log_model_evaluation(args.evaluation_file, run_id) - - # Add inference metrics to the same run - log_inference_performance(args.inference_file, run_id, args.artifacts_dir) - - print(f"Complete run logged with ID: {run_id}") - - else: - parser.print_help() - - -if __name__ == "__main__": - main() diff --git a/src/training/scripts/s3_ferry_service.py b/src/training/scripts/s3_ferry_service.py new file mode 100644 index 00000000..193945cd --- /dev/null +++ b/src/training/scripts/s3_ferry_service.py @@ -0,0 +1,181 @@ +"""Service for S3Ferry file transfer operations.""" + +import requests +import traceback +from typing import Dict + +from loguru import logger +import os +import sys +from scripts.constants import ( + LOG_DIRECTORY, + LOG_FORMAT, + LOG_FILE_NAME, + ROTATION_SIZE, + RETENTION_PERIOD, + LOG_FILE_HANDLER_FORMAT, + S3_FERRY_BASE_URL, +) + +os.makedirs(LOG_DIRECTORY, exist_ok=True) + +# Remove default handler and add custom ones +logger.remove() + +# Add console handler for immediate feedback +logger.add( + sys.stderr, + level="DEBUG", + format=LOG_FORMAT, + colorize=True, +) + +# Add file handler +logger.add( + sink=os.path.join(LOG_DIRECTORY, LOG_FILE_NAME), + level="DEBUG", + rotation=ROTATION_SIZE, + retention=RETENTION_PERIOD, + backtrace=True, + diagnose=True, + format=LOG_FILE_HANDLER_FORMAT, +) + + +class S3Ferry: + """Service class for handling S3Ferry file transfer operations.""" + + def __init__(self, base_url: str = S3_FERRY_BASE_URL): + """ + Initialize the S3Ferry service. + + Args: + base_url: Base URL for the S3Ferry service + """ + self.base_url = base_url + self.url = f"{base_url}/v1/files/copy" + logger.info(f"S3Ferry service initialized with URL: {self.url}") + + def transfer_file( + self, + destination_file_path: str, + destination_storage_type: str, + source_file_path: str, + source_storage_type: str, + ) -> requests.Response: + """ + Transfer a file using S3Ferry service. + + Args: + destination_file_path: Path where the file should be stored in destination + destination_storage_type: Type of destination storage (e.g., 's3', 'local') + source_file_path: Path of the source file + source_storage_type: Type of source storage (e.g., 'local', 's3') + + Returns: + Response object from the S3Ferry service + """ + try: + payload = self.get_s3_ferry_payload( + destination_file_path, + destination_storage_type, + source_file_path, + source_storage_type, + ) + + logger.info( + f"[S3_FERRY] Transferring file: {source_file_path} -> {destination_file_path}" + ) + logger.debug(f"[S3_FERRY] Payload: {payload}") + + response = requests.post( + self.url, + json=payload, + headers={"Content-Type": "application/json"}, + timeout=60, + ) + + logger.info(f"[S3_FERRY] Transfer response status: {response.status_code}") + + # Accept both 200 (OK) and 201 (Created) as success + if response.status_code not in [200, 201]: + logger.error(f"[S3_FERRY] Transfer failed: {response.text}") + else: + logger.info( + f"[S3_FERRY] ✅ Transfer successful (HTTP {response.status_code})" + ) + + return response + + except Exception as e: + logger.error(f"[S3_FERRY] Error during file transfer: {str(e)}") + traceback.print_exc() + raise + + def get_s3_ferry_payload( + self, + destination_file_path: str, + destination_storage_type: str, + source_file_path: str, + source_storage_type: str, + ) -> Dict[str, str]: + """ + Generate S3Ferry payload for file transfer. + + Args: + destination_file_path: Path where the file should be stored in destination + destination_storage_type: Type of destination storage + source_file_path: Path of the source file + source_storage_type: Type of source storage + + Returns: + Dictionary containing the S3Ferry payload + """ + payload = { + "destinationFilePath": destination_file_path, + "destinationStorageType": destination_storage_type, + "sourceFilePath": source_file_path, + "sourceStorageType": source_storage_type, + } + + return payload + + def upload_to_s3( + self, local_file_path: str, s3_destination_path: str + ) -> requests.Response: + """ + Convenience method to upload a local file to S3. + + Args: + local_file_path: Path to the local file + s3_destination_path: S3 destination path (e.g., 'bucket/folder/file.json') + + Returns: + Response object from the S3Ferry service + """ + return self.transfer_file( + destination_file_path=s3_destination_path, + destination_storage_type="S3", + source_file_path=local_file_path, + source_storage_type="FS", + ) + + def download_from_s3( + self, s3_source_path: str, local_destination_path: str + ) -> requests.Response: + """ + Convenience method to download a file from S3 to local storage. + + Args: + s3_source_path: S3 source path (e.g., 'bucket/folder/file.json') + local_destination_path: Local destination path + + Returns: + Response object from the S3Ferry service + """ + return self.transfer_file( + destination_file_path=local_destination_path, + destination_storage_type="local", + source_file_path=s3_source_path, + source_storage_type="s3", + ) diff --git a/src/training/scripts/s3_utility_handler.py b/src/training/scripts/s3_utility_handler.py new file mode 100644 index 00000000..ff681460 --- /dev/null +++ b/src/training/scripts/s3_utility_handler.py @@ -0,0 +1,193 @@ +import os +import sys +import zipfile +from pathlib import Path +from datetime import datetime + +from scripts.s3_ferry_service import S3Ferry + +from loguru import logger +from scripts.constants import ( + LOG_DIRECTORY, + LOG_FORMAT, + LOG_FILE_NAME, + ROTATION_SIZE, + RETENTION_PERIOD, + LOG_FILE_HANDLER_FORMAT, + DATASETS_ARTIFACTS_DIR, + MODELS_DIR, + TRAINING_DATASET_FOLDER_NAME, + PROCESSED_DATASET_FOLDER_NAME, +) + +os.makedirs(LOG_DIRECTORY, exist_ok=True) + +# Remove default handler and add custom ones +logger.remove() + +# Add console handler for immediate feedback +logger.add( + sys.stderr, + level="DEBUG", + format=LOG_FORMAT, + colorize=True, +) + +# Add file handler +logger.add( + sink=os.path.join(LOG_DIRECTORY, LOG_FILE_NAME), + level="DEBUG", + rotation=ROTATION_SIZE, + retention=RETENTION_PERIOD, + backtrace=True, + diagnose=True, + format=LOG_FILE_HANDLER_FORMAT, +) + + +class S3DatasetService: + """Service for downloading datasets from S3 and uploading trained models.""" + + def __init__(self): + self.s3_ferry = S3Ferry() + self.dataset_artifacts_dir = DATASETS_ARTIFACTS_DIR + self.models_dir = MODELS_DIR + + # Create required directories + os.makedirs( + os.path.join(self.dataset_artifacts_dir, TRAINING_DATASET_FOLDER_NAME), + exist_ok=True, + ) + os.makedirs( + os.path.join(self.dataset_artifacts_dir, PROCESSED_DATASET_FOLDER_NAME), + exist_ok=True, + ) + os.makedirs(self.models_dir, exist_ok=True) + + def download_aggregated_dataset(self, dataset_id: str) -> str: + """ + Download aggregated dataset from S3. + + Args: + dataset_id: Dataset ID (e.g., "3") + + Returns: + Path to downloaded dataset file + """ + try: + logger.info(f"Downloading aggregated dataset for dataset ID: {dataset_id}") + + # Define paths + s3_source_path = f"{dataset_id}/aggregated_dataset.json" + local_dest_path = ( + f"src/training/dataset_artifacts/training_datasets/{dataset_id}.json" + ) + + logger.info(f"S3 source: {s3_source_path}") + logger.info(f"Local destination: {local_dest_path}") + + # Download from S3 using S3Ferry + response = self.s3_ferry.transfer_file( + destination_file_path=local_dest_path, + destination_storage_type="FS", + source_file_path=s3_source_path, + source_storage_type="S3", + ) + + logger.info(f"S3Ferry response status: {response.status_code}") + + if response.status_code in [200, 201]: + full_local_path = f"/app/{local_dest_path}" + + if os.path.exists(full_local_path): + logger.info( + f"Successfully downloaded dataset to: {full_local_path}" + ) + return full_local_path + else: + raise FileNotFoundError( + f"Downloaded file not found at: {full_local_path}" + ) + else: + raise RuntimeError( + f"S3 download failed: HTTP {response.status_code} - {response.text}" + ) + + except Exception as e: + logger.error(f"Error downloading dataset {dataset_id}: {str(e)}") + raise + + def upload_trained_model( + self, model_dir: str, model_id: int, model_type: str + ) -> str: + """ + Zip and upload trained model to S3. + + Args: + model_dir: Path to the trained model directory + dataset_id: Dataset ID + model_type: Model type (e.g., "bert", "roberta") + + Returns: + S3 path of uploaded model + """ + try: + logger.info(f"Preparing to upload model from: {model_dir}") + + # Create zip file name with timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + model_id = str(model_id) + zip_filename = f"{model_type}_dataset_{model_id}_model_{timestamp}.zip" + zip_path = os.path.join(self.models_dir, zip_filename) + + # Zip the model directory + self._zip_directory(model_dir, zip_path) + + # Upload to S3 + s3_dest_path = f"trained_models/{model_id}/{zip_filename}" + + logger.info(f"Uploading zipped model to S3: {s3_dest_path}") + + response = self.s3_ferry.transfer_file( + destination_file_path=s3_dest_path, + destination_storage_type="S3", + source_file_path=zip_path.replace("/app/", ""), + source_storage_type="FS", + ) + + if response.status_code in [200, 201]: + logger.info(f"Successfully uploaded model to S3: {s3_dest_path}") + + # Clean up local zip file + os.remove(zip_path) + logger.info(f"Cleaned up local zip file: {zip_path}") + + return s3_dest_path + else: + raise RuntimeError( + f"S3 upload failed: HTTP {response.status_code} - {response.text}" + ) + + except Exception as e: + logger.error(f"Error uploading model: {str(e)}") + raise + + def _zip_directory(self, source_dir: str, zip_path: str): + """Zip a directory and all its contents.""" + logger.info(f"Zipping directory {source_dir} to {zip_path}") + + with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf: + source_path = Path(source_dir) + + for file_path in source_path.rglob("*"): + if file_path.is_file(): + # Create relative path for zip + relative_path = file_path.relative_to(source_path) + zipf.write(file_path, relative_path) + logger.debug(f"Added to zip: {relative_path}") + + logger.info(f"Successfully created zip file: {zip_path}") + + # Log zip file size + zip_size = os.path.getsize(zip_path) + logger.info(f"Zip file size: {zip_size / (1024 * 1024):.2f} MB") diff --git a/src/training/scripts/train.py b/src/training/scripts/train.py index 2a072afc..27f5a077 100644 --- a/src/training/scripts/train.py +++ b/src/training/scripts/train.py @@ -2,41 +2,75 @@ import argparse import time import json -import random -import numpy as np -import pandas as pd from datetime import datetime +from typing import Dict, Any, List import torch -import torch.nn as nn from torch.utils.data import Dataset, DataLoader from torch.optim import AdamW from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, - get_linear_schedule_with_warmup, - set_seed, + get_linear_schedule_with_warmup ) -from sklearn.metrics import ( - accuracy_score, - precision_recall_fscore_support, - roc_auc_score, - confusion_matrix, - roc_curve, +from scripts.utils import ( + log_confusion_matrix_with_names, + compute_metrics, + preprocess_training_summary_from_dict, + set_random_seeds, + measure_inference_time, + evaluate, + update_job_status ) import mlflow -import matplotlib.pyplot as plt -import seaborn as sns +from scripts.s3_utility_handler import S3DatasetService +from scripts.create_datasets import ScalableDatasetProcessor +import sys +from scripts.constants import ( + LOG_DIRECTORY, + LOG_FORMAT, + LOG_FILE_NAME, + ROTATION_SIZE, + RETENTION_PERIOD, + LOG_FILE_HANDLER_FORMAT, + MODEL_CONFIG, + REQUIRED_TRAINING_FILES, + PROCESSED_DATASET_DIR, + TEST_SIZE, + VALIDATION_SIZE, + RANDOM_STATE +) +from transformers.onnx import export +from transformers.onnx.features import FeaturesManager +from pathlib import Path +from loguru import logger + +os.makedirs(LOG_DIRECTORY, exist_ok=True) + +# Remove default handler and add custom ones +logger.remove() + +# Add console handler for immediate feedback +logger.add( + sys.stderr, + level="DEBUG", + format=LOG_FORMAT, + colorize=True, +) -# Define constants and configurations -AVAILABLE_MODELS = { - "bert": "bert-base-uncased", - "roberta": "roberta-base", - "xlm": "xlm-roberta-base", -} +# Add file handler +logger.add( + sink=os.path.join(LOG_DIRECTORY, LOG_FILE_NAME), + level="DEBUG", + rotation=ROTATION_SIZE, + retention=RETENTION_PERIOD, + backtrace=True, + diagnose=True, + format=LOG_FILE_HANDLER_FORMAT, +) class TextClassificationDataset(Dataset): @@ -70,851 +104,921 @@ def __getitem__(self, idx): } -def set_random_seeds(seed_val=42): - random.seed(seed_val) - np.random.seed(seed_val) - torch.manual_seed(seed_val) - torch.cuda.manual_seed_all(seed_val) - set_seed(seed_val) +def convert_model_to_onnx(model_dir: str): + """ + Convert a Hugging Face model to ONNX format. + """ + try: + model = AutoModelForSequenceClassification.from_pretrained(model_dir) + tokenizer = AutoTokenizer.from_pretrained(model_dir) + + _, model_onnx_config = FeaturesManager.check_supported_model_or_raise( + model, feature="sequence-classification" + ) + onnx_config = model_onnx_config(model.config) + + # # Create dummy input + # dummy_inputs= tokenizer( + # "This is a dummy input for ONNX export", + # return_tensors="pt", + # padding="max_length", + # truncation=True, + # max_length=128, + # ) + + output_path = Path(model_dir) / "model.onnx" + + logger.info(f"Exporting model to ONNX format at: {output_path}") + export(tokenizer, model, onnx_config, opset=14, output=output_path) + + logger.info(f"ONNX model exported to: {output_path}") + return str(output_path) + except Exception as e: + logger.error(f"ONNX export failed: {e}") + return None -def load_data_from_dataset_folder(dataset_id: str, data_dir: str = "data/processed", split: str = None): +def validate_processed_dataset(processed_dataset_dir: str) -> Dict[str, Any]: """ - Load data from the new dataset structure. - + Validate that the processed dataset has all required files. + Args: - dataset_id: Dataset ID (e.g., "3") - data_dir: Base directory containing processed datasets - split: Specific split to load (train/val/test) - + processed_dataset_dir: Path to processed dataset directory + Returns: - texts, labels, label_mappings or splits, label_mappings + Dictionary with validation results and dataset info """ - dataset_dir = os.path.join(data_dir, f"dataset_{dataset_id}") - - if not os.path.exists(dataset_dir): - raise FileNotFoundError(f"Dataset directory not found: {dataset_dir}") - - # Load label mappings - mappings_file = os.path.join(dataset_dir, "label_mappings.json") - with open(mappings_file, 'r', encoding='utf-8') as f: - label_mappings = json.load(f) - - if split: - # Load specific split - split_file = os.path.join(dataset_dir, f"{split}.json") - with open(split_file, 'r', encoding='utf-8') as f: - data = json.load(f) - - return data['texts'], data['label_ids'], label_mappings - else: - # Load all splits - splits = {} - for split_name in ['train', 'val', 'test']: - split_file = os.path.join(dataset_dir, f"{split_name}.json") - if os.path.exists(split_file): - with open(split_file, 'r', encoding='utf-8') as f: - data = json.load(f) - splits[split_name] = { - 'texts': data['texts'], - 'labels': data['label_ids'] # Use numeric labels - } - - return splits, label_mappings + try: + required_files = REQUIRED_TRAINING_FILES + validation_result = {"valid": True, "missing_files": [], "dataset_info": {}} + + # Check for required files + for file_name in required_files: + file_path = os.path.join(processed_dataset_dir, file_name) + if not os.path.exists(file_path): + validation_result["valid"] = False + validation_result["missing_files"].append(file_name) + + if validation_result["valid"]: + # Load dataset info + label_mappings_path = os.path.join( + processed_dataset_dir, "label_mappings.json" + ) + with open(label_mappings_path, "r", encoding="utf-8") as f: + label_mappings = json.load(f) + + # Count samples in each split + for split in ["train", "val", "test"]: + split_file = os.path.join(processed_dataset_dir, f"{split}.json") + with open(split_file, "r", encoding="utf-8") as f: + split_data = json.load(f) + validation_result["dataset_info"][f"{split}_samples"] = len( + split_data["texts"] + ) + + validation_result["dataset_info"]["num_classes"] = label_mappings[ + "num_classes" + ] + validation_result["dataset_info"]["class_names"] = list( + label_mappings["label_to_id"].keys() + ) + + logger.info(f"Dataset validation result: {validation_result}") + return validation_result + + except OSError as e: + logger.error(f"Error validating processed dataset: {str(e)}") + raise -def load_data(data_path, split=None): +def load_data_from_dataset_folder( + dataset_id: str, data_dir: str, split: str = None, download_from_s3: bool = False +): """ - Load the data from the given path (legacy format). + Load data from the dataset structure, with option to download from S3. Args: - data_path: Path to the data file (CSV or JSON) - split: Train/val/test split to load + dataset_id: Dataset ID (e.g., "3") + data_dir: Base directory containing prcessed datasets which is used in training + split: Specific split to load (train/val/test) + download_from_s3: Whether to download from S3 first Returns: - texts, labels + texts, labels, label_mappings or splits, label_mappings """ - if split: - data_path = os.path.join(data_path, f"{split}.csv") + if download_from_s3: + logger.info(f"Downloading and preprocessing dataset {dataset_id} from S3...") - if data_path.endswith(".csv"): - df = pd.read_csv(data_path) - elif data_path.endswith(".json"): - df = pd.read_json(data_path, lines=True) - else: - raise ValueError(f"Unsupported file format: {data_path}") + try: + s3_service = S3DatasetService() - text_col = "text" if "text" in df.columns else "content" + # Step 1: Download aggregated dataset from S3 + logger.info("Step 1: Downloading aggregated dataset from S3...") + aggregated_dataset_path = s3_service.download_aggregated_dataset(dataset_id) - # Check for 'agency' column (our case) or fall back to 'label' or 'class' - if "agency" in df.columns: - label_col = "agency" - elif "label" in df.columns: - label_col = "label" - elif "class" in df.columns: - label_col = "class" - else: - raise ValueError( - "No suitable label column found in the data. Need 'agency', 'label', or 'class'." - ) + # Step 2: Use ScalableDatasetProcessor directly for preprocessing + logger.info( + "Step 2: Preprocessing dataset using ScalableDatasetProcessor..." + ) - # If labels are strings (e.g., agency names), convert to integers - if df[label_col].dtype == "object": - # Create a mapping of agency names to integers - unique_labels = df[label_col].unique() - label_map = {label: i for i, label in enumerate(unique_labels)} + # Set up output directory for processed datasets + processed_output_dir = PROCESSED_DATASET_DIR - # Save the mapping for later reference - mapping_file = os.path.join(os.path.dirname(data_path), "label_mapping.json") - os.makedirs(os.path.dirname(mapping_file), exist_ok=True) - with open(mapping_file, "w") as f: - json.dump(label_map, f, indent=2) + # Initialize ScalableDatasetProcessor + processor = ScalableDatasetProcessor( + dataset_path=aggregated_dataset_path, output_dir=processed_output_dir + ) - # Convert string labels to integers - labels = df[label_col].map(label_map).values - else: - labels = df[label_col].values + # Process the dataset + result = processor.process_dataset( + test_size=TEST_SIZE, val_size=VALIDATION_SIZE, random_state=RANDOM_STATE + ) - return df[text_col].values, labels + logger.info(f"Dataset processing result: {result}") + # Step 3: Validate processed dataset + processed_dataset_dir = result["output_dir"] + validation_result = validate_processed_dataset(processed_dataset_dir) -# ...existing code... + if not validation_result["valid"]: + raise FileNotFoundError( + f"Dataset validation failed. Missing files: {validation_result['missing_files']}" + ) -def compute_metrics(preds, labels, probs=None): - """ - Compute various classification metrics. + logger.info(f"Dataset info: {validation_result['dataset_info']}") - Args: - preds: Predicted labels - labels: True labels - probs: Prediction probabilities for ROC/PR curves + # Use the processed dataset directory + dataset_dir = processed_dataset_dir - Returns: - Dictionary of metrics - """ - precision, recall, f1, _ = precision_recall_fscore_support( - labels, preds, average="weighted" - ) + except Exception as e: + logger.error(f"Failed to download and preprocess from S3: {str(e)}") + raise + else: + # This is the legacy format where datasets are stored locally and it should be in processed format + dataset_dir = os.path.join(data_dir, f"dataset_{dataset_id}") - acc = accuracy_score(labels, preds) + if not os.path.exists(dataset_dir): + raise FileNotFoundError(f"Dataset directory not found: {dataset_dir}") - metrics = { - "accuracy": acc, - "precision": precision, - "recall": recall, - "f1": f1, - } + # Load label mappings + mappings_file = os.path.join(dataset_dir, "label_mappings.json") + with open(mappings_file, "r", encoding="utf-8") as f: + label_mappings = json.load(f) - # Compute confusion matrix - cm = confusion_matrix(labels, preds) - - # Calculate class-wise metrics - class_precision, class_recall, class_f1, _ = precision_recall_fscore_support( - labels, preds, average=None - ) - - # Add class metrics to the return dict - for i, (p, r, f) in enumerate(zip(class_precision, class_recall, class_f1)): - metrics[f"class_{i}_precision"] = p - metrics[f"class_{i}_recall"] = r - metrics[f"class_{i}_f1"] = f - - # Compute ROC AUC if probabilities are provided - if probs is not None: - if probs.shape[1] == 2: # Binary classification - metrics["roc_auc"] = roc_auc_score(labels, probs[:, 1]) - - # Compute FPR at 95% TPR (using MLflow-compatible naming) - fpr, tpr, thresholds = roc_curve(labels, probs[:, 1]) - if any(tpr >= 0.95): - idx = np.argmin(np.abs(tpr - 0.95)) - metrics["fpr_at_95_tpr"] = fpr[idx] - - # Additional useful metrics with proper naming - if any(tpr >= 0.90): - idx_90 = np.argmin(np.abs(tpr - 0.90)) - metrics["fpr_at_90_tpr"] = fpr[idx_90] - - else: # Multi-class - try: - # One-hot encode the labels for multi-class ROC AUC - labels_one_hot = np.zeros((len(labels), probs.shape[1])) - for i, label in enumerate(labels): - labels_one_hot[i, label] = 1 + if split: + # Load specific split + split_file = os.path.join(dataset_dir, f"{split}.json") + with open(split_file, "r", encoding="utf-8") as f: + data = json.load(f) - metrics["roc_auc"] = roc_auc_score( - labels_one_hot, probs, average="weighted", multi_class="ovr" - ) - except Exception as e: - print(f"Warning: Could not compute ROC AUC for multi-class: {str(e)}") + return data["texts"], data["label_ids"], label_mappings + else: + # Load all splits + splits = {} + for split_name in ["train", "val", "test"]: + split_file = os.path.join(dataset_dir, f"{split_name}.json") + if os.path.exists(split_file): + with open(split_file, "r", encoding="utf-8") as f: + data = json.load(f) + splits[split_name] = { + "texts": data["texts"], + "labels": data["label_ids"], + } - return metrics, cm + return splits, label_mappings -def log_confusion_matrix(cm, class_names=None): +def train_epoch(model, dataloader, optimizer, scheduler, device): """ - Create and log a confusion matrix figure to MLflow. - + Execute one training epoch with forward pass, backpropagation, and optimization. + Args: - cm: Confusion matrix - class_names: Names of the classes - """ - if class_names is None: - class_names = [f"Class {i}" for i in range(cm.shape[0])] - - plt.figure(figsize=(10, 8)) - sns.heatmap( - cm, - annot=True, - fmt="d", - cmap="Blues", - xticklabels=class_names, - yticklabels=class_names, - ) - plt.xlabel("Predicted") - plt.ylabel("True") - plt.title("Confusion Matrix") - - # Save the figure - confusion_matrix_path = "confusion_matrix.png" - plt.tight_layout() - plt.savefig(confusion_matrix_path) - plt.close() - - # Log the figure to MLflow - mlflow.log_artifact(confusion_matrix_path) - - # Clean up the file - os.remove(confusion_matrix_path) - - -def log_confusion_matrix_with_names(cm, class_names): - """ - Create and log a confusion matrix figure with class names to MLflow. + model (torch.nn.Module): The model to train. + dataloader (DataLoader): Training data batches. + optimizer (torch.optim.Optimizer): Optimizer for parameter updates. + scheduler: Learning rate scheduler. + device (torch.device): Device for computation (CPU/CUDA). + + Returns: + float: Average training loss for the epoch. + + Note: + Includes gradient clipping (max_norm=1.0) for training stability. """ - plt.figure(figsize=(max(10, len(class_names) * 1.2), max(8, len(class_names) * 1.0))) - sns.heatmap( - cm, - annot=True, - fmt="d", - cmap="Blues", - xticklabels=class_names, - yticklabels=class_names, - ) - plt.xlabel("Predicted") - plt.ylabel("True") - plt.title("Confusion Matrix") - plt.xticks(rotation=45, ha='right') - plt.yticks(rotation=0) - - # Save the figure - confusion_matrix_path = "confusion_matrix.png" - plt.tight_layout() - plt.savefig(confusion_matrix_path, dpi=300, bbox_inches='tight') - plt.close() - - # Log the figure to MLflow - mlflow.log_artifact(confusion_matrix_path) - - # Clean up the file - os.remove(confusion_matrix_path) - - -def train_epoch(model, dataloader, optimizer, scheduler, device): - """Run a single training epoch.""" model.train() total_loss = 0 for batch in dataloader: - optimizer.zero_grad() - input_ids = batch["input_ids"].to(device) attention_mask = batch["attention_mask"].to(device) labels = batch["label"].to(device) + optimizer.zero_grad() + outputs = model( input_ids=input_ids, attention_mask=attention_mask, labels=labels ) loss = outputs.loss - total_loss += loss.item() - loss.backward() + + # Gradient clipping torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) + optimizer.step() scheduler.step() - return total_loss / len(dataloader) + total_loss += loss.item() + return total_loss / len(dataloader) -def evaluate(model, dataloader, device, num_labels): - """Evaluate the model on the given dataloader.""" - model.eval() - all_preds = [] - all_labels = [] - all_probs = [] +def train_single_model( + model_type: str, + train_texts: List[str], + train_labels: List[int], + val_texts: List[str], + val_labels: List[int], + test_texts: List[str], + test_labels: List[int], + label_mappings: Dict, + num_labels: int, + class_names: List[str], + output_dir: str, + args, +) -> Dict[str, Any]: + """ + Train a single transformer model for text classification with MLflow tracking. + + Args: + model_type (str): Model architecture (e.g., "bert", "roberta"). + train_texts, train_labels: Training data and labels. + val_texts, val_labels: Validation data and labels. + test_texts, test_labels: Test data and labels. + label_mappings (Dict): Label mappings and metadata. + num_labels (int): Number of classification classes. + class_names (List[str]): Class names for logging. + output_dir (str): Directory to save trained model. + args: Training hyperparameters and configuration. + + Returns: + Dict[str, Any]: Training results with status, metrics, and model paths. + + Note: + Uses early stopping, MLflow tracking, and saves best model with ONNX export. + """ - with torch.no_grad(): - for batch in dataloader: - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - labels = batch["label"].to(device) + try: + # Set up device + device = torch.device( + "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" + ) - outputs = model(input_ids=input_ids, attention_mask=attention_mask) + # Get model configuration + model_name = MODEL_CONFIG.get(model_type, {}).get("name", model_type) + config_max_length = MODEL_CONFIG.get(model_type, {}).get( + "max_length", args.max_seq_length + ) + max_length = config_max_length if config_max_length else args.max_seq_length + logger.info(f"Device: {device}") + logger.info(f"Using model: {model_name}") + logger.info(f"Max sequence length: {max_length}") + + # Set up tokenizer and model + tokenizer = AutoTokenizer.from_pretrained(model_name) + model = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + model.to(device) - logits = outputs.logits + # Create datasets + train_dataset = TextClassificationDataset( + train_texts, train_labels, tokenizer, max_length=max_length + ) + val_dataset = TextClassificationDataset( + val_texts, val_labels, tokenizer, max_length=max_length + ) + test_dataset = TextClassificationDataset( + test_texts, test_labels, tokenizer, max_length=max_length + ) - # Convert logits to probabilities - probs = torch.nn.functional.softmax(logits, dim=1) + # Create dataloaders + train_dataloader = DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True + ) + val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size) + test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size) - # Get predicted class (argmax) - preds = torch.argmax(logits, dim=1) + # Set up optimizer and scheduler + optimizer = AdamW( + model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay + ) + total_steps = len(train_dataloader) * args.num_epochs + warmup_steps = int(total_steps * args.warmup_ratio) + scheduler = get_linear_schedule_with_warmup( + optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps + ) - all_preds.extend(preds.cpu().numpy()) - all_labels.extend(labels.cpu().numpy()) - all_probs.extend(probs.cpu().numpy()) + # Set up MLflow experiment for this model + experiment_name = f"text_classification_{model_type}_dataset_{args.dataset_id}" + mlflow.set_experiment(experiment_name) + + with mlflow.start_run( + run_name=f"{model_type}_dataset_{args.dataset_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ): + # Log parameters + mlflow.log_params( + { + "dataset_id": args.dataset_id, + "model_type": model_type, + "model_name": model_name, + "num_epochs": args.num_epochs, + "batch_size": args.batch_size, + "learning_rate": args.learning_rate, + "max_seq_length": max_length, + "num_labels": num_labels, + "train_samples": len(train_dataset), + "val_samples": len(val_dataset), + "test_samples": len(test_dataset), + } + ) - return np.array(all_preds), np.array(all_labels), np.array(all_probs) + # Training loop + start_time = time.time() + best_val_f1 = 0.0 + best_epoch = 0 + best_model_dir = None + for epoch in range(args.num_epochs): + logger.info(f"Epoch {epoch + 1}/{args.num_epochs}") -def measure_inference_time(model, dataloader, device, num_runs=100): - """Measure the average inference time.""" - model.eval() - batch = next(iter(dataloader)) + # Train + train_loss = train_epoch( + model, train_dataloader, optimizer, scheduler, device + ) + logger.info(f"Train Loss: {train_loss:.4f}") + mlflow.log_metric("train_loss", train_loss, step=epoch) - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) + # Validate + val_preds, val_labels_eval, val_probs = evaluate( + model, val_dataloader, device, num_labels + ) + val_metrics, _ = compute_metrics( + val_preds, val_labels_eval, val_probs, class_names + ) - # Warm-up - for _ in range(10): - with torch.no_grad(): - _ = model(input_ids=input_ids, attention_mask=attention_mask) + logger.info( + f"Val F1: {val_metrics['f1']:.4f}, Val Acc: {val_metrics['accuracy']:.4f}" + ) - # Measure inference time - start_time = time.time() - for _ in range(num_runs): - with torch.no_grad(): - _ = model(input_ids=input_ids, attention_mask=attention_mask) - end_time = time.time() + for metric_name, metric_value in val_metrics.items(): + mlflow.log_metric(f"val_{metric_name}", metric_value, step=epoch) - avg_time = (end_time - start_time) / num_runs - return avg_time + # Save best model + if val_metrics["f1"] > best_val_f1: + best_val_f1 = val_metrics["f1"] + best_epoch = epoch + # Save model + model_dir = os.path.join(output_dir, f"{model_type}_epoch_{epoch}") + os.makedirs(model_dir, exist_ok=True) + model.save_pretrained(model_dir) + tokenizer.save_pretrained(model_dir) -def train_and_evaluate_new_format(args): - """Main training and evaluation function for new dataset format.""" - # Set up random seeds for reproducibility - set_random_seeds(args.seed) + # Save label mappings + mappings_file = os.path.join(model_dir, "label_mappings.json") + with open(mappings_file, "w", encoding="utf-8") as f: + json.dump(label_mappings, f, ensure_ascii=False, indent=2) - # Set up device - device = torch.device( - "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" - ) + best_model_dir = model_dir + logger.info(f"New best model saved at epoch {epoch + 1}") - # Load data from new dataset structure - print(f"Loading dataset ID: {args.dataset_id}") - splits, label_mappings = load_data_from_dataset_folder(args.dataset_id, args.data_dir) - - # Extract data for each split - train_texts = splits['train']['texts'] - train_labels = splits['train']['labels'] - val_texts = splits['val']['texts'] - val_labels = splits['val']['labels'] - test_texts = splits['test']['texts'] - test_labels = splits['test']['labels'] - - # Get number of classes and class names - num_labels = label_mappings['num_classes'] - class_names = [label_mappings['id_to_label'][str(i)] for i in range(num_labels)] - - print(f"Dataset Info:") - print(f" Classes: {num_labels}") - print(f" Class names: {class_names}") - print(f" Train samples: {len(train_texts)}") - print(f" Val samples: {len(val_texts)}") - print(f" Test samples: {len(test_texts)}") - - # Set up model name - model_name = AVAILABLE_MODELS.get(args.model_type, args.model_type) - - # Set up tokenizer and model - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSequenceClassification.from_pretrained( - model_name, num_labels=num_labels - ) - model.to(device) - - # Create datasets - train_dataset = TextClassificationDataset( - train_texts, train_labels, tokenizer, max_length=args.max_seq_length - ) - val_dataset = TextClassificationDataset( - val_texts, val_labels, tokenizer, max_length=args.max_seq_length - ) - test_dataset = TextClassificationDataset( - test_texts, test_labels, tokenizer, max_length=args.max_seq_length - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=args.batch_size, shuffle=True - ) - val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size) - test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size) - - # Set up optimizer and scheduler - optimizer = AdamW( - model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay - ) - - total_steps = len(train_dataloader) * args.num_epochs - warmup_steps = int(total_steps * args.warmup_ratio) - - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps - ) - - # Create output directory with dataset ID - output_dir = os.path.join(args.output_dir, f"dataset_{args.dataset_id}") - os.makedirs(output_dir, exist_ok=True) - - # Set up MLflow - mlflow.set_tracking_uri(args.mlflow_tracking_uri) - experiment_name = f"text_classification_{args.model_type}_dataset_{args.dataset_id}" - mlflow.set_experiment(experiment_name) - - # Start MLflow run - with mlflow.start_run( - run_name=f"{args.model_type}_dataset_{args.dataset_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - ): - # Log parameters - mlflow.log_params( - { - "dataset_id": args.dataset_id, - "model_type": args.model_type, - "model_name": model_name, - "num_epochs": args.num_epochs, - "batch_size": args.batch_size, - "learning_rate": args.learning_rate, - "weight_decay": args.weight_decay, - "warmup_ratio": args.warmup_ratio, - "max_seq_length": args.max_seq_length, - "seed": args.seed, - "device": str(device), - "num_labels": num_labels, - "class_names": class_names, - "train_samples": len(train_dataset), - "val_samples": len(val_dataset), - "test_samples": len(test_dataset), - } - ) + training_time = time.time() - start_time + mlflow.log_metric("training_time_seconds", training_time) - # Training loop - start_time = time.time() - best_val_f1 = 0.0 - best_epoch = 0 + # Load best model for final evaluation + if best_model_dir: + model = AutoModelForSequenceClassification.from_pretrained( + best_model_dir + ) + model.to(device) - for epoch in range(args.num_epochs): - print(f"\nEpoch {epoch + 1}/{args.num_epochs}") - - train_loss = train_epoch( - model, train_dataloader, optimizer, scheduler, device + # Final evaluation + test_preds, test_labels_eval, test_probs = evaluate( + model, test_dataloader, device, num_labels ) - - print(f"Train Loss: {train_loss:.4f}") - mlflow.log_metric("train_loss", train_loss, step=epoch) - - # Evaluate on validation set - val_preds, val_labels, val_probs = evaluate( - model, val_dataloader, device, num_labels + test_metrics, test_cm = compute_metrics( + test_preds, test_labels_eval, test_probs, class_names ) - val_metrics, _ = compute_metrics(val_preds, val_labels, val_probs) - - print(f"Val F1: {val_metrics['f1']:.4f}, Val Acc: {val_metrics['accuracy']:.4f}") - - for metric_name, metric_value in val_metrics.items(): - mlflow.log_metric(f"val_{metric_name}", metric_value, step=epoch) - - # Save best model - if val_metrics["f1"] > best_val_f1: - best_val_f1 = val_metrics["f1"] - best_epoch = epoch - - # Save the model - model_dir = os.path.join(output_dir, f"{args.model_type}_epoch_{epoch}") - os.makedirs(model_dir, exist_ok=True) - model.save_pretrained(model_dir) - tokenizer.save_pretrained(model_dir) - - # Save label mappings with the model - mappings_file = os.path.join(model_dir, "label_mappings.json") - with open(mappings_file, 'w', encoding='utf-8') as f: - json.dump(label_mappings, f, ensure_ascii=False, indent=2) - - print(f"New best model saved at epoch {epoch + 1}") - - # Log the model - mlflow.pytorch.log_model( - model, - f"{args.model_type}_best_model", - registered_model_name=f"{args.model_type}_classifier_dataset_{args.dataset_id}", - ) + logger.info(f"\n{model_type.upper()} Test Results:") + for metric_name, metric_value in test_metrics.items(): + if not metric_name.startswith("class_"): + logger.info(f" {metric_name}: {metric_value:.4f}") + mlflow.log_metric(f"test_{metric_name}", metric_value) + + # Log confusion matrix + log_confusion_matrix_with_names(test_cm, class_names) + + # Measure inference time + inference_time = measure_inference_time(model, test_dataloader, device) + mlflow.log_metric("avg_inference_time_seconds", inference_time) + + return { + "status": "success", + "model_type": model_type, + "model_name": model_name, + "best_epoch": best_epoch + 1, + "best_val_f1": best_val_f1, + "training_time_seconds": training_time, + "test_metrics": test_metrics, + "best_model_path": best_model_dir, + "inference_time_seconds": inference_time, + "num_parameters": sum(p.numel() for p in model.parameters()), + } - training_time = time.time() - start_time - mlflow.log_metric("training_time_seconds", training_time) - print(f"\nTraining completed in {training_time:.2f} seconds") + except Exception as e: + logger.error(f"Error training {model_type}: {str(e)}") + return { + "status": "failed", + "model_type": model_type, + "error": str(e), + "test_metrics": {"f1": 0.0, "accuracy": 0.0}, + } - # Load the best model for final evaluation - best_model_path = os.path.join(output_dir, f"{args.model_type}_epoch_{best_epoch}") - if os.path.exists(best_model_path): - model = AutoModelForSequenceClassification.from_pretrained(best_model_path) - model.to(device) - # Evaluate on test set - print("\nEvaluating on test set...") - test_preds, test_labels, test_probs = evaluate( - model, test_dataloader, device, num_labels +def train_multiple_models(args): + """ + Train multiple transformer models and select the best performing one. + + Orchestrates multi-model training pipeline with data loading, model training, + evaluation, and best model processing including ONNX export and S3 upload. + + Args: + args (argparse.Namespace): Command line arguments containing: + - model_types (str): JSON string of model types (e.g., '["bert", "roberta"]') + - dataset_id (str): Dataset identifier + - data_dir (str): Base directory for datasets + - output_dir (str): Directory to save outputs + - model_id, job_id (int): Unique identifiers + - Training hyperparameters (num_epochs, batch_size, learning_rate, etc.) + + Returns: + Dict[str, Any]: Preprocessed training results containing: + - training_summary: Overall statistics and best model info + - models_performance: Individual model results + - model_comparison: Performance comparison across models + - best_model_s3_path: S3 path of uploaded best model (if successful) + - best_model_onnx_path: ONNX model path (if successful) + + Example: + >>> args = argparse.Namespace(model_types='["bert", "roberta"]', ...) + >>> results = train_multiple_models(args) + >>> print(f"Best model: {results['training_summary']['best_overall_model']}") + + Note: + - Downloads data from S3, trains models sequentially with MLflow tracking + - Uses early stopping, exports best model to ONNX, uploads to S3 + - Failed models don't stop the pipeline + """ + + try: + logger.info("🔍 Starting train_multiple_models function") + logger.info(f"📝 Received args: {vars(args)}") + # Parse model types from JSON string + try: + model_types = json.loads(args.model_types) + if not isinstance(model_types, list): + model_types = [model_types] + except (json.JSONDecodeError, AttributeError): + # Fallback to single model if parsing fails + model_types = [args.model_type] if hasattr(args, "model_type") else ["bert"] + + logger.info(f"Training models: {model_types}") + + # Results storage + all_model_results = {} + best_overall_model = None + best_overall_score = 0.0 + + # Create main output directory + main_output_dir = args.output_dir + os.makedirs(main_output_dir, exist_ok=True) + + # Load data once for all models + logger.info(f"Loading dataset ID: {args.dataset_id}") + splits, label_mappings = load_data_from_dataset_folder( + args.dataset_id, args.data_dir, download_from_s3=True ) - test_metrics, test_cm = compute_metrics(test_preds, test_labels, test_probs) + # Extract data for each split + train_texts = splits["train"]["texts"] + train_labels = splits["train"]["labels"] + val_texts = splits["val"]["texts"] + val_labels = splits["val"]["labels"] + test_texts = splits["test"]["texts"] + test_labels = splits["test"]["labels"] + + num_labels = label_mappings["num_classes"] + class_names = [label_mappings["id_to_label"][str(i)] for i in range(num_labels)] + + logger.info("Dataset Info:") + logger.info(f" Classes: {num_labels}") + logger.info(f" Train samples: {len(train_texts)}") + logger.info(f" Val samples: {len(val_texts)}") + logger.info(f" Test samples: {len(test_texts)}") + + # Train each model type + for model_type in model_types: + try: + logger.info(f"\n{'=' * 60}") + logger.info(f"🚀 TRAINING MODEL: {model_type.upper()}") + logger.info(f"{'=' * 60}") + + # Create model-specific output directory + model_output_dir = os.path.join(main_output_dir, f"model_{model_type}") + os.makedirs(model_output_dir, exist_ok=True) + + # Train single model + model_result = train_single_model( + model_type=model_type, + train_texts=train_texts, + train_labels=train_labels, + val_texts=val_texts, + val_labels=val_labels, + test_texts=test_texts, + test_labels=test_labels, + label_mappings=label_mappings, + num_labels=num_labels, + class_names=class_names, + output_dir=model_output_dir, + args=args, + ) - print("\nTest Results:") - for metric_name, metric_value in test_metrics.items(): - if not metric_name.startswith('class_'): # Only show overall metrics - print(f" {metric_name}: {metric_value:.4f}") - mlflow.log_metric(f"test_{metric_name}", metric_value) + # Store results + all_model_results[model_type] = model_result - # Log confusion matrix with class names - log_confusion_matrix_with_names(test_cm, class_names) + # Check if this is the best model overall + current_score = model_result["test_metrics"]["f1"] + if current_score > best_overall_score: + best_overall_score = current_score + best_overall_model = model_type - # Measure inference time - inference_time = measure_inference_time(model, test_dataloader, device) - mlflow.log_metric("avg_inference_time_seconds", inference_time) - print(f"Average inference time: {inference_time:.6f} seconds") + logger.info( + f"✅ {model_type.upper()} completed - F1: {current_score:.4f}" + ) - # Save comprehensive metrics to a JSON file - metrics_path = os.path.join(output_dir, f"{args.model_type}_metrics.json") - output_metrics = { - "dataset_id": args.dataset_id, - "model_type": args.model_type, - "model_name": model_name, - "num_classes": num_labels, - "class_names": class_names, - "training_time_seconds": training_time, - "avg_inference_time_seconds": inference_time, - "test_metrics": test_metrics, - "best_epoch": best_epoch + 1, - "best_val_f1": best_val_f1, - "label_mappings": label_mappings, - "training_params": { - "num_epochs": args.num_epochs, - "batch_size": args.batch_size, - "learning_rate": args.learning_rate, - "max_seq_length": args.max_seq_length, - "seed": args.seed - } - } + except Exception as e: + logger.error(f"❌ Failed to train {model_type}: {str(e)}") + all_model_results[model_type] = { + "status": "failed", + "error": str(e), + "test_metrics": {"f1": 0.0, "accuracy": 0.0}, + } - with open(metrics_path, "w", encoding='utf-8') as f: - json.dump(output_metrics, f, indent=2, ensure_ascii=False) - - # Log the metrics file - mlflow.log_artifact(metrics_path) - - print(f"\nMetrics saved to: {metrics_path}") - print(f"Best model saved to: {best_model_path}") - - return output_metrics - - -def train_and_evaluate(args): - """Main training and evaluation function for legacy format.""" - # Set up random seeds for reproducibility - set_random_seeds(args.seed) - - # Set up device - device = torch.device( - "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" - ) - - # Load data - train_texts, train_labels = load_data(os.path.join(args.data_dir, "train.csv")) - val_texts, val_labels = load_data(os.path.join(args.data_dir, "val.csv")) - test_texts, test_labels = load_data(os.path.join(args.data_dir, "test.csv")) - - # Determine number of classes - num_labels = len(np.unique(train_labels)) - - # Set up model name - model_name = AVAILABLE_MODELS.get(args.model_type, args.model_type) - - # Set up tokenizer and model - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSequenceClassification.from_pretrained( - model_name, num_labels=num_labels - ) - model.to(device) - - # Create datasets - train_dataset = TextClassificationDataset( - train_texts, train_labels, tokenizer, max_length=args.max_seq_length - ) - val_dataset = TextClassificationDataset( - val_texts, val_labels, tokenizer, max_length=args.max_seq_length - ) - test_dataset = TextClassificationDataset( - test_texts, test_labels, tokenizer, max_length=args.max_seq_length - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=args.batch_size, shuffle=True - ) - val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size) - test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size) - - # Set up optimizer and scheduler - optimizer = AdamW( - model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay - ) - - total_steps = len(train_dataloader) * args.num_epochs - warmup_steps = int(total_steps * args.warmup_ratio) - - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps - ) - - # Set up MLflow - mlflow.set_tracking_uri(args.mlflow_tracking_uri) - experiment_name = f"text_classification_{args.model_type}" - mlflow.set_experiment(experiment_name) - - # Start MLflow run - with mlflow.start_run( - run_name=f"{args.model_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - ): - # Log parameters - mlflow.log_params( - { - "model_type": args.model_type, - "model_name": model_name, - "num_epochs": args.num_epochs, - "batch_size": args.batch_size, - "learning_rate": args.learning_rate, - "weight_decay": args.weight_decay, - "warmup_ratio": args.warmup_ratio, - "max_seq_length": args.max_seq_length, - "seed": args.seed, - "device": str(device), - "num_labels": num_labels, - "train_samples": len(train_dataset), - "val_samples": len(val_dataset), - "test_samples": len(test_dataset), - } - ) + # Process the best model: Export to ONNX and upload to S3 + if ( + best_overall_model + and all_model_results[best_overall_model]["status"] == "success" + ): + try: + logger.info(f"\n{'=' * 60}") + logger.info(f"🔄 PROCESSING BEST MODEL: {best_overall_model.upper()}") + logger.info(f"{'=' * 60}") + + best_model_path = all_model_results[best_overall_model][ + "best_model_path" + ] + + # Step 1: Export to ONNX BEFORE uploading to S3 + logger.info("🔄 Exporting best model to ONNX format...") + onnx_path = convert_model_to_onnx(best_model_path) + + if onnx_path: + logger.info(f"✅ ONNX export completed: {onnx_path}") + # Update the model result to include ONNX info + all_model_results[best_overall_model]["onnx_model_path"] = onnx_path + else: + logger.warning( + "⚠️ ONNX export failed, proceeding without ONNX model" + ) + + # Step 2: Upload to S3 (now includes ONNX model) + logger.info("📦 Uploading best model to S3...") + s3_service = S3DatasetService() + + s3_model_path = s3_service.upload_trained_model( + model_dir=best_model_path, + model_id=args.model_id, + model_type=best_overall_model, + ) - # Training loop - start_time = time.time() - best_val_f1 = 0.0 - best_epoch = 0 + if s3_model_path: + logger.info(f"✅ Best model uploaded to S3: {s3_model_path}") + all_model_results[best_overall_model]["s3_model_path"] = ( + s3_model_path + ) + else: + logger.error("❌ Failed to upload best model to S3") - for epoch in range(args.num_epochs): - train_loss = train_epoch( - model, train_dataloader, optimizer, scheduler, device - ) + except Exception as e: + logger.error(f"❌ Failed to process best model: {e}") - mlflow.log_metric("train_loss", train_loss, step=epoch) + # Create comprehensive results summary + results_summary = create_results_summary( + all_model_results, best_overall_model, args.dataset_id, args.model_id + ) + + # Add S3 path to summary if available + if ( + best_overall_model + and "s3_model_path" in all_model_results[best_overall_model] + ): + results_summary["best_model_s3_path"] = all_model_results[ + best_overall_model + ]["s3_model_path"] + + # Add ONNX path to summary if available + if ( + best_overall_model + and "onnx_model_path" in all_model_results[best_overall_model] + ): + results_summary["best_model_onnx_path"] = all_model_results[ + best_overall_model + ]["onnx_model_path"] + + preprocessed_result_payload = preprocess_training_summary_from_dict( + results_summary + ) - # Evaluate on validation set - val_preds, val_labels, val_probs = evaluate( - model, val_dataloader, device, num_labels + # Save results summary + summary_path = os.path.join(main_output_dir, "training_summary.json") + with open(summary_path, "w", encoding="utf-8") as f: + json.dump(results_summary, f, indent=2, ensure_ascii=False) + + logger.info(f"\n{'=' * 60}") + logger.info("🏆 MULTI-MODEL TRAINING COMPLETED") + logger.info(f"Best Model: {best_overall_model}") + logger.info(f"Best F1 Score: {best_overall_score:.4f}") + logger.info(f"Results saved to: {summary_path}") + if ( + best_overall_model + and "s3_model_path" in all_model_results[best_overall_model] + ): + logger.info( + f"Model uploaded to S3: {all_model_results[best_overall_model]['s3_model_path']}" + ) + if ( + best_overall_model + and "onnx_model_path" in all_model_results[best_overall_model] + ): + logger.info( + f"ONNX model created: {all_model_results[best_overall_model]['onnx_model_path']}" ) + logger.info(f"{'=' * 60}") + + return preprocessed_result_payload + except Exception as e: + logger.error(f"❌ Critical error in train_multiple_models: {str(e)}") + raise # raise the exception so main() can handle it + + +def create_results_summary( + all_model_results: Dict[str, Dict], + best_overall_model: str, + dataset_id: str, + model_id: int, +) -> Dict[str, Any]: + """Create comprehensive results summary.""" + + # Calculate summary statistics + successful_models = [ + r for r in all_model_results.values() if r["status"] == "success" + ] + failed_models = [r for r in all_model_results.values() if r["status"] == "failed"] + + summary = { + "training_summary": { + "dataset_id": dataset_id, + "model_id": model_id, + "timestamp": datetime.now().isoformat(), + "total_models_attempted": len(all_model_results), + "successful_models": len(successful_models), + "failed_models": len(failed_models), + "best_overall_model": best_overall_model, + "best_overall_f1": all_model_results[best_overall_model]["test_metrics"][ + "f1" + ] + if best_overall_model + else 0.0, + }, + "model_results": {}, + "model_comparison": { + "metrics_comparison": {}, + "ranking_by_f1": [], + "ranking_by_accuracy": [], + }, + } - val_metrics, _ = compute_metrics(val_preds, val_labels, val_probs) + # Add individual model results + for model_type, result in all_model_results.items(): + summary["model_results"][model_type] = result + + # Create comparison metrics + if successful_models: + comparison_metrics = ["f1", "accuracy", "precision", "recall"] + for metric in comparison_metrics: + summary["model_comparison"]["metrics_comparison"][metric] = { + model_type: result["test_metrics"].get(metric, 0.0) + for model_type, result in all_model_results.items() + if result["status"] == "success" + } - for metric_name, metric_value in val_metrics.items(): - mlflow.log_metric(f"val_{metric_name}", metric_value, step=epoch) + # Create rankings + summary["model_comparison"]["ranking_by_f1"] = sorted( + [ + {"model_type": r["model_type"], "f1": r["test_metrics"]["f1"]} + for r in successful_models + ], + key=lambda x: x["f1"], + reverse=True, + ) - # Save best model - if val_metrics["f1"] > best_val_f1: - best_val_f1 = val_metrics["f1"] - best_epoch = epoch + summary["model_comparison"]["ranking_by_accuracy"] = sorted( + [ + { + "model_type": r["model_type"], + "accuracy": r["test_metrics"]["accuracy"], + } + for r in successful_models + ], + key=lambda x: x["accuracy"], + reverse=True, + ) + + return summary - # Save the model - model_dir = os.path.join( - args.output_dir, f"{args.model_type}_epoch_{epoch}" - ) - os.makedirs(model_dir, exist_ok=True) - model.save_pretrained(model_dir) - tokenizer.save_pretrained(model_dir) - - # Log the model - mlflow.pytorch.log_model( - model, - f"{args.model_type}_best_model", - registered_model_name=f"{args.model_type}_classifier", - ) - training_time = time.time() - start_time - mlflow.log_metric("training_time_seconds", training_time) +def main(): + logger.info("Starting multi-model training script") - # Load the best model for final evaluation - best_model_path = os.path.join( - args.output_dir, f"{args.model_type}_epoch_{best_epoch}" + try: + parser = argparse.ArgumentParser( + description="Train and evaluate multiple transformer-based text classifiers" ) - if os.path.exists(best_model_path): - model = AutoModelForSequenceClassification.from_pretrained(best_model_path) - model.to(device) - # Evaluate on test set - test_preds, test_labels, test_probs = evaluate( - model, test_dataloader, device, num_labels + # Updated argument for multiple models + parser.add_argument( + "--model_types", + type=str, + required=True, + help='JSON array of model types to train (e.g., \'["bert","roberta","xlm"]\')', ) - test_metrics, test_cm = compute_metrics(test_preds, test_labels, test_probs) + # Keep backward compatibility + parser.add_argument( + "--model_type", + type=str, + help="Single model type (for backward compatibility)", + ) - for metric_name, metric_value in test_metrics.items(): - print(f" {metric_name}: {metric_value:.4f}") - mlflow.log_metric(f"test_{metric_name}", metric_value) + # Model Id + parser.add_argument( + "--model_id", + type=int, + required=True, + help="Unique identifier for the model", + ) - # Log confusion matrix - log_confusion_matrix(test_cm) + # Job ID + parser.add_argument( + "--job_id", + type=int, + required=True, + help="Unique identifier for the training job", + ) - # Measure inference time - inference_time = measure_inference_time(model, test_dataloader, device) - mlflow.log_metric("avg_inference_time_seconds", inference_time) + # Dataset ID + parser.add_argument( + "--dataset_id", + type=str, + required=True, + help="Dataset ID for new format (e.g., '3'). Uses new dataset structure.", + ) - # Save metrics to a JSON file for easy access - metrics_path = os.path.join(args.output_dir, f"{args.model_type}_metrics.json") - output_metrics = { - "model_type": args.model_type, - "model_name": model_name, - "training_time_seconds": training_time, - "avg_inference_time_seconds": inference_time, - "test_metrics": test_metrics, - "best_epoch": best_epoch + 1, - "best_val_f1": best_val_f1, - } + # Data directory + parser.add_argument( + "--data_dir", + type=str, + default="data/processed", + help="Path to the directory containing the processed datasets", + ) - with open(metrics_path, "w") as f: - json.dump(output_metrics, f, indent=2) + # Output directory + parser.add_argument( + "--output_dir", + type=str, + required=True, + help="Path to save model checkpoints and outputs", + ) - # Log the metrics file - mlflow.log_artifact(metrics_path) + parser.add_argument( + "--model_name", + type=str, + help="Custom model name (when model_type is 'other')", + ) - return output_metrics + # Training parameters + parser.add_argument( + "--num_epochs", type=int, default=3, help="Number of training epochs" + ) + parser.add_argument("--batch_size", type=int, default=16, help="Batch size") + parser.add_argument( + "--learning_rate", type=float, default=2e-5, help="Learning rate" + ) + parser.add_argument( + "--weight_decay", type=float, default=0.01, help="Weight decay" + ) + parser.add_argument( + "--warmup_ratio", type=float, default=0.1, help="Warmup ratio" + ) + parser.add_argument( + "--max_seq_length", type=int, default=128, help="Maximum sequence length" + ) + parser.add_argument("--seed", type=int, default=42, help="Random seed") + parser.add_argument( + "--no_cuda", action="store_true", help="Don't use CUDA even if available" + ) + # MLflow parameters + parser.add_argument( + "--mlflow_tracking_uri", + type=str, + help="MLflow tracking URI", + ) -def main(): - parser = argparse.ArgumentParser( - description="Train and evaluate transformer-based text classifiers" - ) - - # Required parameters - parser.add_argument( - "--model_type", - type=str, - required=True, - choices=list(AVAILABLE_MODELS.keys()) + ["other"], - help="Type of model to use (bert, roberta, xlm, or other)", - ) - - # New dataset structure parameters - parser.add_argument( - "--dataset_id", - type=str, - help="Dataset ID for new format (e.g., '3'). If provided, uses new dataset structure.", - ) - parser.add_argument( - "--data_dir", - type=str, - default="data/processed", - help="Path to the directory containing the processed datasets", - ) - - # Legacy parameter for backward compatibility - parser.add_argument( - "--legacy_data_dir", - type=str, - help="Path to legacy data directory (for old format)", - ) - - parser.add_argument( - "--output_dir", - type=str, - required=True, - help="Path to save model checkpoints and outputs", - ) - - # Optional parameters - parser.add_argument( - "--model_name", - type=str, - default=None, - help="Full model name (if not using a predefined type)", - ) - parser.add_argument( - "--mlflow_tracking_uri", - type=str, - default="mlruns", - help="URI for MLflow tracking server", - ) - parser.add_argument( - "--num_epochs", type=int, default=5, help="Number of training epochs" - ) - parser.add_argument( - "--batch_size", - type=int, - default=16, - help="Batch size for training and evaluation", - ) - parser.add_argument( - "--learning_rate", type=float, default=2e-5, help="Learning rate" - ) - parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay") - parser.add_argument( - "--warmup_ratio", - type=float, - default=0.1, - help="Ratio of training steps for LR warmup", - ) - parser.add_argument( - "--max_seq_length", - type=int, - default=128, - help="Maximum sequence length for tokenization", - ) - parser.add_argument( - "--seed", type=int, default=42, help="Random seed for reproducibility" - ) - parser.add_argument( - "--no_cuda", action="store_true", help="Disable CUDA even if available" - ) - - args = parser.parse_args() - - # Create output directory if it doesn't exist - os.makedirs(args.output_dir, exist_ok=True) - - # Update model_name if provided - if args.model_name and args.model_type == "other": - AVAILABLE_MODELS["other"] = args.model_name - - # Choose training function based on dataset format - if args.dataset_id: - # Use new dataset structure - print(f"Using new dataset structure with dataset ID: {args.dataset_id}") - train_and_evaluate_new_format(args) - elif args.legacy_data_dir: - # Use legacy format - print("Using legacy dataset structure") - args.data_dir = args.legacy_data_dir # Set for legacy function - train_and_evaluate(args) - else: - raise ValueError("Either --dataset_id (for new format) or --legacy_data_dir (for old format) must be provided") + args = parser.parse_args() + + # Set random seeds + set_random_seeds(args.seed) + + # Create output directory + os.makedirs(args.output_dir, exist_ok=True) + + # Set MLflow tracking URI + if args.mlflow_tracking_uri: + mlflow.set_tracking_uri(args.mlflow_tracking_uri) + + # Update model_name if provided + if ( + args.model_name + and hasattr(args, "model_type") + and args.model_type == "other" + ): + MODEL_CONFIG["other"] = {"name": args.model_name, "max_length": 128} + + # Train multiple models + preprocessed_result_payload = train_multiple_models(args) + + # Check if training was successful + if not preprocessed_result_payload or not preprocessed_result_payload.get( + "models_performance" + ): + logger.error("❌ Training failed: No valid results produced") + sys.exit(1) + + # Check if at least one model was successfully trained + successful_models = [ + model + for model in preprocessed_result_payload["models_performance"] + if model.get("status") == "success" + ] + + if not successful_models: + logger.error("❌ Training failed: No models were successfully trained") + sys.exit(1) + + # ======================TO DO: training results in DB======================== + logger.info(f"Preprocessed result payload: {preprocessed_result_payload}") + # =========================================================================== + + # Update job status to trained + job_status = update_job_status(job_id=args.job_id, status="trained") + + if not job_status: + logger.error( + f"Failed to update job status for job ID: {args.job_id} || Training pipeline may not be complete." + ) + else: + logger.info(f"Job status updated successfully for job ID: {args.job_id}") + logger.info("🎉 Multi-model training pipeline completed successfully!") + sys.exit(0) # Explicit success exit + except Exception as e: + # Update job status to failed + # update_job_status(job_id=args.job_id, status="failed") + logger.error(f"❌ Training failed with error: {str(e)}") + logger.error("Full traceback:", exc_info=True) + sys.exit(1) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/training/scripts/train_v1.py b/src/training/scripts/train_v1.py deleted file mode 100644 index b259968c..00000000 --- a/src/training/scripts/train_v1.py +++ /dev/null @@ -1,582 +0,0 @@ -import os -import argparse -import time -import json -import random -import numpy as np -import pandas as pd -from datetime import datetime - -import torch -import torch.nn as nn -from torch.utils.data import Dataset, DataLoader -from torch.optim import AdamW - -from transformers import ( - AutoTokenizer, - AutoModelForSequenceClassification, - get_linear_schedule_with_warmup, - set_seed, -) - -from sklearn.metrics import ( - accuracy_score, - precision_recall_fscore_support, - roc_auc_score, - confusion_matrix, - roc_curve, -) - -import mlflow -import matplotlib.pyplot as plt -import seaborn as sns - -# Define constants and configurations -AVAILABLE_MODELS = { - "bert": "bert-base-uncased", - "roberta": "roberta-base", - "xlm": "xlm-roberta-base", -} - - -class TextClassificationDataset(Dataset): - - def __init__(self, texts, labels, tokenizer, max_length=128): - self.texts = texts - self.labels = labels - self.tokenizer = tokenizer - self.max_length = max_length - - def __len__(self): - return len(self.texts) - - def __getitem__(self, idx): - text = str(self.texts[idx]) - label = self.labels[idx] - - encoding = self.tokenizer( - text, - add_special_tokens=True, - max_length=self.max_length, - padding="max_length", - truncation=True, - return_attention_mask=True, - return_tensors="pt", - ) - - return { - "input_ids": encoding["input_ids"].flatten(), - "attention_mask": encoding["attention_mask"].flatten(), - "label": torch.tensor(label, dtype=torch.long), - } - - -def set_random_seeds(seed_val=42): - random.seed(seed_val) - np.random.seed(seed_val) - torch.manual_seed(seed_val) - torch.cuda.manual_seed_all(seed_val) - set_seed(seed_val) - - -def load_data(data_path, split=None): - """ - Load the data from the given path. - - Args: - data_path: Path to the data file (CSV or JSON) - split: Train/val/test split to load - - Returns: - texts, labels - """ - if split: - data_path = os.path.join(data_path, f"{split}.csv") - - if data_path.endswith(".csv"): - df = pd.read_csv(data_path) - elif data_path.endswith(".json"): - df = pd.read_json(data_path, lines=True) - else: - raise ValueError(f"Unsupported file format: {data_path}") - - text_col = "text" if "text" in df.columns else "content" - - # Check for 'agency' column (our case) or fall back to 'label' or 'class' - if "agency" in df.columns: - label_col = "agency" - elif "label" in df.columns: - label_col = "label" - elif "class" in df.columns: - label_col = "class" - else: - raise ValueError( - "No suitable label column found in the data. Need 'agency', 'label', or 'class'." - ) - - # If labels are strings (e.g., agency names), convert to integers - if df[label_col].dtype == "object": - # Create a mapping of agency names to integers - unique_labels = df[label_col].unique() - label_map = {label: i for i, label in enumerate(unique_labels)} - - # Save the mapping for later reference - mapping_file = os.path.join(os.path.dirname(data_path), "label_mapping.json") - os.makedirs(os.path.dirname(mapping_file), exist_ok=True) - with open(mapping_file, "w") as f: - json.dump(label_map, f, indent=2) - - # Convert string labels to integers - labels = df[label_col].map(label_map).values - else: - labels = df[label_col].values - - return df[text_col].values, labels - - -def compute_metrics(preds, labels, probs=None): - """ - Compute various classification metrics. - - Args: - preds: Predicted labels - labels: True labels - probs: Prediction probabilities for ROC/PR curves - - Returns: - Dictionary of metrics - """ - precision, recall, f1, _ = precision_recall_fscore_support( - labels, preds, average="weighted" - ) - - acc = accuracy_score(labels, preds) - - metrics = { - "accuracy": acc, - "precision": precision, - "recall": recall, - "f1": f1, - } - - # Compute confusion matrix - cm = confusion_matrix(labels, preds) - - # Calculate class-wise metrics - class_precision, class_recall, class_f1, _ = precision_recall_fscore_support( - labels, preds, average=None - ) - - # Add class metrics to the return dict - for i, (p, r, f) in enumerate(zip(class_precision, class_recall, class_f1)): - metrics[f"class_{i}_precision"] = p - metrics[f"class_{i}_recall"] = r - metrics[f"class_{i}_f1"] = f - - # Compute ROC AUC if probabilities are provided and it's a binary task - if probs is not None: - if probs.shape[1] == 2: # Binary classification - metrics["roc_auc"] = roc_auc_score(labels, probs[:, 1]) - # Compute FPR@95TPR - fpr, tpr, thresholds = roc_curve(labels, probs[:, 1]) - if any(tpr >= 0.95): - idx = np.argmin(np.abs(tpr - 0.95)) - metrics["fpr@95tpr"] = fpr[idx] - else: # Multi-class - try: - # One-hot encode the labels for multi-class ROC AUC - labels_one_hot = np.zeros((len(labels), probs.shape[1])) - for i, label in enumerate(labels): - labels_one_hot[i, label] = 1 - - metrics["roc_auc"] = roc_auc_score( - labels_one_hot, probs, average="weighted", multi_class="ovr" - ) - except Exception as e: - print(f"Warning: Could not compute ROC AUC for multi-class: {str(e)}") - - return metrics, cm - - -def log_confusion_matrix(cm, class_names=None): - """ - Create and log a confusion matrix figure to MLflow. - - Args: - cm: Confusion matrix - class_names: Names of the classes - """ - if class_names is None: - class_names = [f"Class {i}" for i in range(cm.shape[0])] - - plt.figure(figsize=(10, 8)) - sns.heatmap( - cm, - annot=True, - fmt="d", - cmap="Blues", - xticklabels=class_names, - yticklabels=class_names, - ) - plt.xlabel("Predicted") - plt.ylabel("True") - plt.title("Confusion Matrix") - - # Save the figure - confusion_matrix_path = "confusion_matrix.png" - plt.tight_layout() - plt.savefig(confusion_matrix_path) - plt.close() - - # Log the figure to MLflow - mlflow.log_artifact(confusion_matrix_path) - - # Clean up the file - os.remove(confusion_matrix_path) - - -def train_epoch(model, dataloader, optimizer, scheduler, device): - """Run a single training epoch.""" - model.train() - total_loss = 0 - - for batch in dataloader: - optimizer.zero_grad() - - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - labels = batch["label"].to(device) - - outputs = model( - input_ids=input_ids, attention_mask=attention_mask, labels=labels - ) - - loss = outputs.loss - total_loss += loss.item() - - loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) - optimizer.step() - scheduler.step() - - return total_loss / len(dataloader) - - -def evaluate(model, dataloader, device, num_labels): - """Evaluate the model on the given dataloader.""" - model.eval() - all_preds = [] - all_labels = [] - all_probs = [] - - with torch.no_grad(): - for batch in dataloader: - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - labels = batch["label"].to(device) - - outputs = model(input_ids=input_ids, attention_mask=attention_mask) - - logits = outputs.logits - - # Convert logits to probabilities - probs = torch.nn.functional.softmax(logits, dim=1) - - # Get predicted class (argmax) - preds = torch.argmax(logits, dim=1) - - all_preds.extend(preds.cpu().numpy()) - all_labels.extend(labels.cpu().numpy()) - all_probs.extend(probs.cpu().numpy()) - - return np.array(all_preds), np.array(all_labels), np.array(all_probs) - - -def measure_inference_time(model, dataloader, device, num_runs=100): - """Measure the average inference time.""" - model.eval() - batch = next(iter(dataloader)) - - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - - # Warm-up - for _ in range(10): - with torch.no_grad(): - _ = model(input_ids=input_ids, attention_mask=attention_mask) - - # Measure inference time - start_time = time.time() - for _ in range(num_runs): - with torch.no_grad(): - _ = model(input_ids=input_ids, attention_mask=attention_mask) - end_time = time.time() - - avg_time = (end_time - start_time) / num_runs - return avg_time - - -def train_and_evaluate(args): - """Main training and evaluation function.""" - # Set up random seeds for reproducibility - set_random_seeds(args.seed) - - # Set up device - device = torch.device( - "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" - ) - - # Load data - train_texts, train_labels = load_data(os.path.join(args.data_dir, "train.csv")) - val_texts, val_labels = load_data(os.path.join(args.data_dir, "val.csv")) - test_texts, test_labels = load_data(os.path.join(args.data_dir, "test.csv")) - - # Determine number of classes - num_labels = len(np.unique(train_labels)) - - # Set up model name - model_name = AVAILABLE_MODELS.get(args.model_type, args.model_type) - - # Set up tokenizer and model - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSequenceClassification.from_pretrained( - model_name, num_labels=num_labels - ) - model.to(device) - - # Create datasets - train_dataset = TextClassificationDataset( - train_texts, train_labels, tokenizer, max_length=args.max_seq_length - ) - val_dataset = TextClassificationDataset( - val_texts, val_labels, tokenizer, max_length=args.max_seq_length - ) - test_dataset = TextClassificationDataset( - test_texts, test_labels, tokenizer, max_length=args.max_seq_length - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=args.batch_size, shuffle=True - ) - val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size) - test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size) - - # Set up optimizer and scheduler - optimizer = AdamW( - model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay - ) - - total_steps = len(train_dataloader) * args.num_epochs - warmup_steps = int(total_steps * args.warmup_ratio) - - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps - ) - - # Set up MLflow - mlflow.set_tracking_uri(args.mlflow_tracking_uri) - experiment_name = f"text_classification_{args.model_type}" - mlflow.set_experiment(experiment_name) - - # Start MLflow run - with mlflow.start_run( - run_name=f"{args.model_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - ): - # Log parameters - mlflow.log_params( - { - "model_type": args.model_type, - "model_name": model_name, - "num_epochs": args.num_epochs, - "batch_size": args.batch_size, - "learning_rate": args.learning_rate, - "weight_decay": args.weight_decay, - "warmup_ratio": args.warmup_ratio, - "max_seq_length": args.max_seq_length, - "seed": args.seed, - "device": str(device), - "num_labels": num_labels, - "train_samples": len(train_dataset), - "val_samples": len(val_dataset), - "test_samples": len(test_dataset), - } - ) - - # Training loop - start_time = time.time() - - best_val_f1 = 0.0 - best_epoch = 0 - - for epoch in range(args.num_epochs): - - train_loss = train_epoch( - model, train_dataloader, optimizer, scheduler, device - ) - - mlflow.log_metric("train_loss", train_loss, step=epoch) - - # Evaluate on validation set - val_preds, val_labels, val_probs = evaluate( - model, val_dataloader, device, num_labels - ) - - val_metrics, _ = compute_metrics(val_preds, val_labels, val_probs) - - for metric_name, metric_value in val_metrics.items(): - mlflow.log_metric(f"val_{metric_name}", metric_value, step=epoch) - - # Save best model - if val_metrics["f1"] > best_val_f1: - best_val_f1 = val_metrics["f1"] - best_epoch = epoch - - # Save the model - model_dir = os.path.join( - args.output_dir, f"{args.model_type}_epoch_{epoch}" - ) - os.makedirs(model_dir, exist_ok=True) - model.save_pretrained(model_dir) - tokenizer.save_pretrained(model_dir) - - # Log the model - mlflow.pytorch.log_model( - model, - f"{args.model_type}_best_model", - registered_model_name=f"{args.model_type}_classifier", - ) - - training_time = time.time() - start_time - mlflow.log_metric("training_time_seconds", training_time) - - # Load the best model for final evaluation - best_model_path = os.path.join( - args.output_dir, f"{args.model_type}_epoch_{best_epoch}" - ) - if os.path.exists(best_model_path): - model = AutoModelForSequenceClassification.from_pretrained(best_model_path) - model.to(device) - - # Evaluate on test set - test_preds, test_labels, test_probs = evaluate( - model, test_dataloader, device, num_labels - ) - - test_metrics, test_cm = compute_metrics(test_preds, test_labels, test_probs) - - for metric_name, metric_value in test_metrics.items(): - print(f" {metric_name}: {metric_value:.4f}") - mlflow.log_metric(f"test_{metric_name}", metric_value) - - # Log confusion matrix - log_confusion_matrix(test_cm) - - # Measure inference time - inference_time = measure_inference_time(model, test_dataloader, device) - mlflow.log_metric("avg_inference_time_seconds", inference_time) - - # Save metrics to a JSON file for easy access - metrics_path = os.path.join(args.output_dir, f"{args.model_type}_metrics.json") - output_metrics = { - "model_type": args.model_type, - "model_name": model_name, - "training_time_seconds": training_time, - "avg_inference_time_seconds": inference_time, - "test_metrics": test_metrics, - "best_epoch": best_epoch + 1, - "best_val_f1": best_val_f1, - } - - with open(metrics_path, "w") as f: - json.dump(output_metrics, f, indent=2) - - # Log the metrics file - mlflow.log_artifact(metrics_path) - - -def main(): - parser = argparse.ArgumentParser( - description="Train and evaluate transformer-based text classifiers" - ) - - # Required parameters - parser.add_argument( - "--model_type", - type=str, - required=True, - choices=list(AVAILABLE_MODELS.keys()) + ["other"], - help="Type of model to use (bert, roberta, xlm, or other)", - ) - parser.add_argument( - "--data_dir", - type=str, - required=True, - help="Path to the directory containing the data files", - ) - parser.add_argument( - "--output_dir", - type=str, - required=True, - help="Path to save model checkpoints and outputs", - ) - - # Optional parameters - parser.add_argument( - "--model_name", - type=str, - default=None, - help="Full model name (if not using a predefined type)", - ) - parser.add_argument( - "--mlflow_tracking_uri", - type=str, - default="mlruns", - help="URI for MLflow tracking server", - ) - parser.add_argument( - "--num_epochs", type=int, default=5, help="Number of training epochs" - ) - parser.add_argument( - "--batch_size", - type=int, - default=16, - help="Batch size for training and evaluation", - ) - parser.add_argument( - "--learning_rate", type=float, default=2e-5, help="Learning rate" - ) - parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay") - parser.add_argument( - "--warmup_ratio", - type=float, - default=0.1, - help="Ratio of training steps for LR warmup", - ) - parser.add_argument( - "--max_seq_length", - type=int, - default=128, - help="Maximum sequence length for tokenization", - ) - parser.add_argument( - "--seed", type=int, default=42, help="Random seed for reproducibility" - ) - parser.add_argument( - "--no_cuda", action="store_true", help="Disable CUDA even if available" - ) - - args = parser.parse_args() - - # Create output directory if it doesn't exist - os.makedirs(args.output_dir, exist_ok=True) - - # Update model_name if provided - if args.model_name and args.model_type == "other": - AVAILABLE_MODELS["other"] = args.model_name - - train_and_evaluate(args) - - -if __name__ == "__main__": - main() diff --git a/src/training/scripts/utils.py b/src/training/scripts/utils.py index 38ec5538..13a8f4c2 100644 --- a/src/training/scripts/utils.py +++ b/src/training/scripts/utils.py @@ -1,641 +1,418 @@ -import os +from sklearn.metrics import confusion_matrix +import seaborn as sns +import matplotlib.pyplot as plt import random +import time +import torch +from transformers import set_seed +import mlflow import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -import seaborn as sns -from sklearn.model_selection import train_test_split +import requests +import os +import sys +from loguru import logger +from typing import Dict, Any from sklearn.metrics import ( accuracy_score, precision_recall_fscore_support, - confusion_matrix, - roc_curve, - precision_recall_curve, - auc, - roc_auc_score, - average_precision_score, + roc_auc_score +) +from scripts.constants import ( + LOG_DIRECTORY, + LOG_FORMAT, + LOG_FILE_NAME, + ROTATION_SIZE, + RETENTION_PERIOD, + LOG_FILE_HANDLER_FORMAT, + TRAINING_JOB_STATUS_UPDATE_URL, + SEED, ) -import torch -from transformers import set_seed -import mlflow -import json -import time -from datetime import datetime -from constants import MODEL_CONFIG - - -def set_random_seeds(seed_val=42): - """ - Set random seeds for reproducibility across Python, NumPy and PyTorch. - - Args: - seed_val (int): The seed value to use - """ - random.seed(seed_val) - np.random.seed(seed_val) - torch.manual_seed(seed_val) - torch.cuda.manual_seed_all(seed_val) - set_seed(seed_val) - # Set deterministic behavior for reproducibility - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def preprocess_data(data_path, text_col="text", label_col="label", save_path=None): - """ - Preprocess the data from a CSV or JSON file. - - Args: - data_path (str): Path to the data file - text_col (str): Name of the column containing the text - label_col (str): Name of the column containing the labels - save_path (str, optional): Path to save the preprocessed data - - Returns: - pd.DataFrame: Preprocessed dataframe - """ - # Load the data - if data_path.endswith(".csv"): - df = pd.read_csv(data_path) - elif data_path.endswith(".json"): - df = pd.read_json(data_path, lines=True) - else: - raise ValueError(f"Unsupported file format: {data_path}") - - # Verify columns exist - if text_col not in df.columns: - raise ValueError(f"Text column '{text_col}' not found in data") - if label_col not in df.columns: - raise ValueError(f"Label column '{label_col}' not found in data") - - # Basic preprocessing - df = df.dropna(subset=[text_col, label_col]) - - # Ensure labels are numeric - if not pd.api.types.is_numeric_dtype(df[label_col]): - # If labels are strings, convert to integers - label_map = {label: i for i, label in enumerate(df[label_col].unique())} - df[label_col] = df[label_col].map(label_map) - - # Save the label mapping for reference - if save_path: - label_map_path = os.path.join(os.path.dirname(save_path), "label_map.json") - with open(label_map_path, "w") as f: - json.dump(label_map, f, indent=2) - - # Save preprocessed data if a path is provided - if save_path: - os.makedirs(os.path.dirname(save_path), exist_ok=True) - if save_path.endswith(".csv"): - df.to_csv(save_path, index=False) - elif save_path.endswith(".json"): - df.to_json(save_path, orient="records", lines=True) - - return df - - -def split_data( - df, - text_col="text", - label_col="label", - test_size=0.2, - val_size=0.1, - stratify=True, - random_state=42, - save_dir=None, -): - """ - Split the data into train, validation, and test sets. - """ - actual_val_size = val_size / (1 - test_size) - - stratify_col = df[label_col] if stratify else None - train_val_df, test_df = train_test_split( - df, test_size=test_size, random_state=random_state, stratify=stratify_col - ) - stratify_col = train_val_df[label_col] if stratify else None - train_df, val_df = train_test_split( - train_val_df, - test_size=actual_val_size, - random_state=random_state, - stratify=stratify_col, - ) +os.makedirs(LOG_DIRECTORY, exist_ok=True) - if save_dir: - os.makedirs(save_dir, exist_ok=True) - train_df.to_csv(os.path.join(save_dir, "train.csv"), index=False) - val_df.to_csv(os.path.join(save_dir, "val.csv"), index=False) - test_df.to_csv(os.path.join(save_dir, "test.csv"), index=False) - - split_info = { - "total_samples": len(df), - "train_samples": len(train_df), - "val_samples": len(val_df), - "test_samples": len(test_df), - "test_size": test_size, - "val_size": val_size, - "stratify": stratify, - "random_state": random_state, - "date_created": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - } +# Remove default handler and add custom ones +logger.remove() - with open(os.path.join(save_dir, "split_info.json"), "w") as f: - json.dump(split_info, f, indent=2) +# Add console handler for immediate feedback +logger.add( + sys.stderr, + level="DEBUG", + format=LOG_FORMAT, + colorize=True, +) - return train_df, val_df, test_df +# Add file handler +logger.add( + sink=os.path.join(LOG_DIRECTORY, LOG_FILE_NAME), + level="DEBUG", + rotation=ROTATION_SIZE, + retention=RETENTION_PERIOD, + backtrace=True, + diagnose=True, + format=LOG_FILE_HANDLER_FORMAT, +) -def compute_metrics(y_true, y_pred, y_proba=None, average="weighted"): +def log_confusion_matrix(cm, class_names=None): """ - Compute classification metrics. - + Create and log a confusion matrix figure to MLflow. """ - metrics = {} - - # Basic classification metrics - metrics["accuracy"] = accuracy_score(y_true, y_pred) - - precision, recall, f1, _ = precision_recall_fscore_support( - y_true, y_pred, average=average - ) + plt.figure(figsize=(10, 8)) + if class_names: + sns.heatmap( + cm, + annot=True, + fmt="d", + cmap="Blues", + xticklabels=class_names, + yticklabels=class_names, + ) + else: + sns.heatmap(cm, annot=True, fmt="d", cmap="Blues") - metrics["precision"] = precision - metrics["recall"] = recall - metrics["f1"] = f1 + plt.title("Confusion Matrix") + plt.ylabel("True Label") + plt.xlabel("Predicted Label") + plt.tight_layout() - # Confusion matrix - cm = confusion_matrix(y_true, y_pred) - metrics["confusion_matrix"] = cm + # Log to MLflow + mlflow.log_figure(plt.gcf(), "confusion_matrix.png") + plt.close() - # Calculate per-class metrics - class_precision, class_recall, class_f1, _ = precision_recall_fscore_support( - y_true, y_pred, average=None - ) - # Add class metrics - num_classes = len(np.unique(y_true)) - for i in range(num_classes): - metrics[f"class_{i}_precision"] = class_precision[i] - metrics[f"class_{i}_recall"] = class_recall[i] - metrics[f"class_{i}_f1"] = class_f1[i] - - # ROC and PR curve metrics if probabilities are provided - if y_proba is not None: - # For binary classification - if num_classes == 2 and y_proba.shape[1] == 2: - fpr, tpr, _ = roc_curve(y_true, y_proba[:, 1]) - metrics["roc_auc"] = auc(fpr, tpr) - - # Find the FPR at 95% TPR - if any(tpr >= 0.95): - idx_95 = next(i for i, x in enumerate(tpr) if x >= 0.95) - metrics["fpr@95tpr"] = fpr[idx_95] - else: - metrics["fpr@95tpr"] = float("nan") - - # Precision-Recall AUC - precision, recall, _ = precision_recall_curve(y_true, y_proba[:, 1]) - metrics["pr_auc"] = auc(recall, precision) - - # For multi-class classification - else: - try: - # One-hot encode the labels - y_true_onehot = np.zeros((len(y_true), num_classes)) - for i, val in enumerate(y_true): - y_true_onehot[i, val] = 1 - - # Calculate ROC AUC for multi-class - metrics["roc_auc"] = roc_auc_score( - y_true_onehot, y_proba, average=average, multi_class="ovr" - ) - - # Calculate average precision score - metrics["pr_auc"] = average_precision_score( - y_true_onehot, y_proba, average=average - ) - except Exception as e: - print(f"Warning: Could not compute ROC/PR metrics: {str(e)}") - - return metrics - - -def plot_confusion_matrix( - cm, - class_names=None, - normalize=False, - title="Confusion Matrix", - save_path=None, - figsize=(10, 8), - cmap="Blues", -): +def log_confusion_matrix_with_names(cm, class_names): """ - Plot a confusion matrix. - + Create and log a confusion matrix figure with class names to MLflow. """ - if normalize: - cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] - fmt = ".2f" - else: - fmt = "d" - - if class_names is None: - class_names = [f"Class {i}" for i in range(cm.shape[0])] + plt.figure(figsize=(12, 10)) - plt.figure(figsize=figsize) + # Create heatmap sns.heatmap( cm, annot=True, - fmt=fmt, - cmap=cmap, + fmt="d", + cmap="Blues", xticklabels=class_names, yticklabels=class_names, + cbar_kws={"label": "Count"}, ) - plt.xlabel("Predicted") - plt.ylabel("True") - plt.title(title) - plt.tight_layout() - - if save_path: - plt.savefig(save_path) - - return plt.gcf() + plt.title("Confusion Matrix", fontsize=16) + plt.ylabel("True Label", fontsize=14) + plt.xlabel("Predicted Label", fontsize=14) -def plot_roc_curve(fpr, tpr, roc_auc, save_path=None, figsize=(8, 6)): - """ - Plot a ROC curve - """ - plt.figure(figsize=figsize) - plt.plot(fpr, tpr, lw=2, label=f"ROC curve (area = {roc_auc:.2f})") - plt.plot([0, 1], [0, 1], "k--", lw=2) - plt.xlim([0.0, 1.0]) - plt.ylim([0.0, 1.05]) - plt.xlabel("False Positive Rate") - plt.ylabel("True Positive Rate") - plt.title("Receiver Operating Characteristic (ROC) Curve") - plt.legend(loc="lower right") + # Rotate labels for better readability + plt.xticks(rotation=45, ha="right") + plt.yticks(rotation=0) - if save_path: - plt.savefig(save_path) + plt.tight_layout() - return plt.gcf() + # Log to MLflow + mlflow.log_figure(plt.gcf(), "confusion_matrix_with_names.png") + plt.close() -def plot_precision_recall_curve( - precision, recall, average_precision, save_path=None, figsize=(8, 6) -): +def compute_metrics(predictions, labels, probs, class_names=None): """ - Plot a precision-recall curve + Compute comprehensive metrics including per-class accuracy with class names. + + Args: + predictions: Model predictions + labels: True labels + probs: Prediction probabilities + class_names: List of class names (optional) """ - plt.figure(figsize=figsize) - plt.plot(recall, precision, lw=2, label=f"PR curve (AP = {average_precision:.2f})") - plt.xlim([0.0, 1.0]) - plt.ylim([0.0, 1.05]) - plt.xlabel("Recall") - plt.ylabel("Precision") - plt.title("Precision-Recall Curve") - plt.legend(loc="lower left") - if save_path: - plt.savefig(save_path) + # Overall metrics + accuracy = accuracy_score(labels, predictions) + precision, recall, f1, _ = precision_recall_fscore_support( + labels, predictions, average="macro" + ) - return plt.gcf() + # Per-class metrics + precision_per_class, recall_per_class, f1_per_class, _ = ( + precision_recall_fscore_support(labels, predictions, average=None) + ) + # Calculate per-class accuracy + cm = confusion_matrix(labels, predictions) + per_class_accuracy = cm.diagonal() / cm.sum(axis=1) -def log_plots_to_mlflow(metrics, class_names=None, output_dir=None): - """ - Create and log plots to MLflow based on computed metrics - """ - if output_dir: - os.makedirs(output_dir, exist_ok=True) - - # Plot confusion matrix - if "confusion_matrix" in metrics: - cm = metrics["confusion_matrix"] - cm_path = ( - os.path.join(output_dir, "confusion_matrix.png") if output_dir else None - ) - plot_confusion_matrix(cm, class_names=class_names, save_path=cm_path) + # Build metrics dictionary - FLAT structure with meaningful names + metrics = { + "accuracy": accuracy, + "precision": precision, + "recall": recall, + "f1": f1, + } - if cm_path: - mlflow.log_artifact(cm_path) + # Add per-class metrics directly to main dictionary (no nesting) + for i in range(len(precision_per_class)): + if class_names and i < len(class_names): + # Use actual class name + class_key = class_names[i].replace(" ", "_").lower() + else: + # Fallback to class_N format + class_key = f"class_{i}" + + # Add metrics directly to main dictionary + metrics[f"{class_key}_precision"] = precision_per_class[i] + metrics[f"{class_key}_recall"] = recall_per_class[i] + metrics[f"{class_key}_f1"] = f1_per_class[i] + metrics[f"{class_key}_accuracy"] = per_class_accuracy[i] + + # ROC AUC for binary/multiclass + if len(np.unique(labels)) == 2: + metrics["roc_auc"] = roc_auc_score(labels, probs[:, 1]) + else: + metrics["roc_auc"] = roc_auc_score(labels, probs, multi_class="ovr") - # Plot ROC curve for binary classification - if "roc_curve" in metrics: - fpr = metrics["roc_curve"]["fpr"] - tpr = metrics["roc_curve"]["tpr"] - roc_auc = metrics["roc_auc"] + return metrics, cm - roc_path = os.path.join(output_dir, "roc_curve.png") if output_dir else None - plot_roc_curve(fpr, tpr, roc_auc, save_path=roc_path) - if roc_path: - mlflow.log_artifact(roc_path) +def update_job_status(job_id: int, status: str) -> bool: + """Update training job status in database.""" + try: + payload = {"jobId": job_id, "jobStatus": status} - # Plot precision-recall curve - if "pr_curve" in metrics: - precision = metrics["pr_curve"]["precision"] - recall = metrics["pr_curve"]["recall"] - avg_precision = metrics["pr_auc"] + response = requests.post( + TRAINING_JOB_STATUS_UPDATE_URL, + json=payload, + headers={"Content-Type": "application/json"}, + timeout=30, + ) - pr_path = os.path.join(output_dir, "pr_curve.png") if output_dir else None - plot_precision_recall_curve(precision, recall, avg_precision, save_path=pr_path) + if response.status_code == 200: + logger.info(f"✅ Job status updated to '{status}' for job ID: {job_id}") + return True + else: + logger.error(f"Failed to update job status: HTTP {response.status_code}") + return False - if pr_path: - mlflow.log_artifact(pr_path) + except Exception as e: + logger.error(f"Error updating job status: {str(e)}") + return False -def measure_model_size(model): +def preprocess_training_summary_from_dict( + training_summary_dict: Dict[str, Any], +) -> Dict[str, Any]: """ - Measure the size of a PyTorch model in MB. + Efficiently preprocess training_summary dictionary to create a sorted payload. + Use this when you already have the data loaded as a dictionary. Args: - model (torch.nn.Module): The model to measure + training_summary_dict: Training summary data as dictionary Returns: - float: Size of the model in MB + Dictionary with preprocessed payload sorted by F1 score """ - model_size = 0 - for param in model.parameters(): - model_size += param.nelement() * param.element_size() + try: + # Extract basic info + training_summary = training_summary_dict.get("training_summary", {}) + model_results = training_summary_dict.get("model_results", {}) + + # Build models performance list with efficient processing + models_performance = [] + + for model_type, result in model_results.items(): + if result.get("status") == "success": + test_metrics = result.get("test_metrics", {}) + + # Extract overall metrics (non-class specific) + overall_metrics = { + "accuracy": test_metrics.get("accuracy", 0.0), + "precision": test_metrics.get("precision", 0.0), + "recall": test_metrics.get("recall", 0.0), + "f1": test_metrics.get("f1", 0.0), + "roc_auc": test_metrics.get("roc_auc", 0.0), + } + + # Extract class-specific metrics efficiently using dict comprehension + class_metrics = {} + class_metric_items = [ + (k, v) + for k, v in test_metrics.items() + if "_" in k + and k.endswith(("_precision", "_recall", "_f1", "_accuracy")) + ] + + for key, value in class_metric_items: + parts = key.rsplit("_", 1) + if len(parts) == 2: + class_name, metric_type = parts + if class_name not in class_metrics: + class_metrics[class_name] = {} + class_metrics[class_name][metric_type] = value + + # Build model performance entry + model_entry = { + "model_type": model_type, + "model_name": result.get("model_name", ""), + "status": result.get("status", "success"), + "best_epoch": result.get("best_epoch", 0), + "best_val_f1": result.get("best_val_f1", 0.0), + "training_time_seconds": result.get("training_time_seconds", 0.0), + "inference_time_seconds": result.get("inference_time_seconds", 0.0), + "num_parameters": result.get("num_parameters", 0), + "overall_metrics": overall_metrics, + "class_metrics": class_metrics, + } + + models_performance.append(model_entry) + + # Sort by F1 score (descending) - most efficient sort + models_performance.sort(key=lambda x: x["overall_metrics"]["f1"], reverse=True) + + # Get best model info (first in sorted list) + best_model_info = {} + if models_performance: + best_model = models_performance[0] + best_model_info = { + "model_type": best_model["model_type"], + "overall_metrics": { + "f1_score": best_model["overall_metrics"]["f1"], + "accuracy": best_model["overall_metrics"]["accuracy"], + "precision": best_model["overall_metrics"]["precision"], + "recall": best_model["overall_metrics"]["recall"], + "roc_auc": best_model["overall_metrics"]["roc_auc"], + }, + "class_metrics": best_model["class_metrics"], + } + + # Build final payload + payload = { + "training_summary": training_summary, + "models_performance": models_performance, + "best_model_info": best_model_info, + } - # Convert to MB - model_size_mb = model_size / (1024 * 1024) - return model_size_mb + return payload + except Exception as e: + logger.error(f"Error preprocessing training summary: {str(e)}") + raise -def measure_inference_speed(model, sample_input, device, num_runs=100, warm_up=10): +def set_random_seeds(seed_val=SEED): + random.seed(seed_val) + np.random.seed(seed_val) + torch.manual_seed(seed_val) + torch.cuda.manual_seed_all(seed_val) + set_seed(seed_val) + +def evaluate(model, dataloader, device, num_labels): """ - Measure the inference speed of a model + Evaluate a trained model on a given dataset and return predictions, true labels, and probabilities. + + This function performs inference on the provided dataloader to compute model predictions, + extract true labels, and calculate class probabilities. It's used for validation during + training and final testing to assess model performance. + + Args: + model (torch.nn.Module): The trained PyTorch model to evaluate. Should be a + transformers AutoModelForSequenceClassification or compatible model. + dataloader (torch.utils.data.DataLoader): DataLoader containing the evaluation dataset. + Expected to yield batches with 'input_ids', 'attention_mask', and 'label' keys. + device (torch.device): The device (CPU or CUDA) where the model and data should be processed. + num_labels (int): Number of classes in the classification task. Used for validation + but not directly in computation. + + Returns: + tuple: A tuple containing three numpy arrays: + - predictions (np.ndarray): Array of predicted class indices with shape (n_samples,). + Contains the argmax of model logits for each sample. + - true_labels (np.ndarray): Array of ground truth class indices with shape (n_samples,). + Contains the actual labels from the dataset. + - all_probs (np.ndarray): Array of class probabilities with shape (n_samples, num_labels). + Contains softmax probabilities for each class for each sample. + + Example: + >>> # During validation + >>> val_preds, val_labels, val_probs = evaluate(model, val_dataloader, device, num_labels) + >>> val_metrics, _ = compute_metrics(val_preds, val_labels, val_probs, class_names) + >>> + >>> # During final testing + >>> test_preds, test_labels, test_probs = evaluate(model, test_dataloader, device, num_labels) + >>> test_metrics, test_cm = compute_metrics(test_preds, test_labels, test_probs, class_names) + + Notes: + - The function sets the model to evaluation mode (model.eval()) and disables gradient + computation (torch.no_grad()) for efficient inference. + - Predictions are computed using argmax of logits, while probabilities use softmax. + - All tensors are moved to CPU and converted to numpy arrays before returning. + - The function processes data in batches to handle large datasets efficiently. + - This function is compatible with Hugging Face transformers models and custom PyTorch models + that return outputs with a 'logits' attribute. + + Raises: + RuntimeError: If the model forward pass fails (e.g., shape mismatches, CUDA errors). + KeyError: If the expected keys ('input_ids', 'attention_mask', 'label') are missing + from dataloader batches. + AttributeError: If the model outputs don't have the expected 'logits' attribute. + + See Also: + - compute_metrics(): Used to calculate evaluation metrics from the returned arrays + - measure_inference_time(): For measuring model inference speed + - train_epoch(): For the training counterpart of this evaluation function """ model.eval() + predictions = [] + true_labels = [] + all_probs = [] - # Move inputs to the appropriate device - input_ids = sample_input["input_ids"].to(device) - attention_mask = sample_input["attention_mask"].to(device) - - # Warm-up runs with torch.no_grad(): - for _ in range(warm_up): - _ = model(input_ids=input_ids, attention_mask=attention_mask) + for batch in dataloader: + input_ids = batch["input_ids"].to(device) + attention_mask = batch["attention_mask"].to(device) + labels = batch["label"].to(device) - # Measure inference time - start_time = time.time() - with torch.no_grad(): - for _ in range(num_runs): - _ = model(input_ids=input_ids, attention_mask=attention_mask) - end_time = time.time() - - avg_time = (end_time - start_time) / num_runs - return avg_time - - -def log_model_metadata(model, tokenizer, config=None, output_dir=None): - """ - Log model metadata to a file and MLflow - """ - if output_dir: - os.makedirs(output_dir, exist_ok=True) - - # Collect model metadata - model_info = { - "model_type": model.__class__.__name__, - "model_parameters": sum(p.numel() for p in model.parameters()), - "trainable_parameters": sum( - p.numel() for p in model.parameters() if p.requires_grad - ), - "tokenizer_type": tokenizer.__class__.__name__, - "vocab_size": len(tokenizer), - "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - } - - # Add any additional config - if config: - model_info.update(config) + outputs = model(input_ids=input_ids, attention_mask=attention_mask) - # Save metadata to file - if output_dir: - metadata_path = os.path.join(output_dir, "model_metadata.json") - with open(metadata_path, "w") as f: - json.dump(model_info, f, indent=2) + logits = outputs.logits + probs = torch.nn.functional.softmax(logits, dim=-1) - # Log to MLflow - mlflow.log_dict(model_info, "model_metadata.json") + predictions.extend(torch.argmax(logits, dim=-1).cpu().numpy()) + true_labels.extend(labels.cpu().numpy()) + all_probs.extend(probs.cpu().numpy()) - return model_info + return np.array(predictions), np.array(true_labels), np.array(all_probs) -def analyze_dataset(data_path, text_col="text", label_col="label", output_dir=None): +def measure_inference_time(model, dataloader, device, num_runs=100): """ - Analyze a dataset and generate statistics. - + Measure the average inference time per batch for model performance benchmarking. + Args: - data_path (str): Path to the dataset - text_col (str): Name of the column containing the text - label_col (str): Name of the column containing the labels - output_dir (str, optional): Directory to save the analysis results - + model (torch.nn.Module): The trained model to benchmark. + dataloader (torch.utils.data.DataLoader): DataLoader with test batches. + device (torch.device): Device for model computation (CPU/CUDA). + num_runs (int, optional): Number of batches to time. Defaults to 100. + Returns: - dict: Dataset statistics + float: Average inference time per batch in seconds. + + Example: + >>> avg_time = measure_inference_time(model, test_dataloader, device) + >>> mlflow.log_metric("avg_inference_time_seconds", avg_time) + + Note: + Used for production deployment planning and model comparison. """ - # Load the data - if data_path.endswith(".csv"): - df = pd.read_csv(data_path) - elif data_path.endswith(".json"): - df = pd.read_json(data_path, lines=True) - else: - raise ValueError(f"Unsupported file format: {data_path}") - - # Basic dataset statistics - stats = { - "num_samples": len(df), - "num_classes": len(df[label_col].unique()), - "class_distribution": df[label_col].value_counts().to_dict(), - "avg_text_length": df[text_col].str.len().mean(), - "min_text_length": df[text_col].str.len().min(), - "max_text_length": df[text_col].str.len().max(), - "median_text_length": df[text_col].str.len().median(), - } - - # Plot class distribution - if output_dir: - os.makedirs(output_dir, exist_ok=True) - - # Class distribution plot - plt.figure(figsize=(10, 6)) - sns.countplot( - y=df[label_col].astype(str), - order=df[label_col].value_counts().index.astype(str), - ) - plt.title("Class Distribution") - plt.xlabel("Count") - plt.ylabel("Class") - plt.tight_layout() - - class_dist_path = os.path.join(output_dir, "class_distribution.png") - plt.savefig(class_dist_path) - plt.close() - - # Text length distribution - plt.figure(figsize=(10, 6)) - sns.histplot(df[text_col].str.len(), bins=50) - plt.title("Text Length Distribution") - plt.xlabel("Text Length (characters)") - plt.ylabel("Count") - plt.tight_layout() - - text_len_path = os.path.join(output_dir, "text_length_distribution.png") - plt.savefig(text_len_path) - plt.close() - - # Save statistics to a JSON file - stats_path = os.path.join(output_dir, "dataset_stats.json") - with open(stats_path, "w") as f: - # Convert any non-serializable values (like numpy.int64) to standard Python types - stats_serializable = { - k: v if isinstance(v, (str, int, float, bool, list, dict)) else int(v) - for k, v in stats.items() - } - json.dump(stats_serializable, f, indent=2) - - # Log to MLflow if active - try: - mlflow.log_artifact(class_dist_path) - mlflow.log_artifact(text_len_path) - mlflow.log_dict(stats, "dataset_stats.json") - except: - # MLflow might not be active - pass - - return stats - + model.eval() + times = [] -def format_time(seconds): - """ - Format time in seconds to a human-readable string. + with torch.no_grad(): + for i, batch in enumerate(dataloader): + if i >= num_runs: + break - Args: - seconds (float): Time in seconds + input_ids = batch["input_ids"].to(device) + attention_mask = batch["attention_mask"].to(device) - Returns: - str: Formatted time string - """ - if seconds < 1e-3: # Less than a millisecond - return f"{seconds * 1e6:.2f} µs" - elif seconds < 1: # Less than a second - return f"{seconds * 1e3:.2f} ms" - elif seconds < 60: # Less than a minute - return f"{seconds:.2f} s" - elif seconds < 3600: # Less than an hour - minutes = int(seconds // 60) - secs = seconds % 60 - return f"{minutes}m {secs:.2f}s" - else: # Hours or more - hours = int(seconds // 3600) - minutes = int((seconds % 3600) // 60) - secs = seconds % 60 - return f"{hours}h {minutes}m {secs:.2f}s" - - -def compare_models(metrics_list, model_names=None, output_dir=None): - """ - Compare multiple models based on their metrics - """ - if model_names is None: - model_names = [f"Model {i+1}" for i in range(len(metrics_list))] + start_time = time.time() + _ = model(input_ids=input_ids, attention_mask=attention_mask) + end_time = time.time() - if len(model_names) != len(metrics_list): - raise ValueError( - "Number of model names must match number of metric dictionaries" - ) + times.append(end_time - start_time) - # Extract common metrics for comparison - common_metrics = [ - "accuracy", - "precision", - "recall", - "f1", - "roc_auc", - "pr_auc", - "fpr@95tpr", - "avg_inference_time_seconds", - "training_time_seconds", - ] - - # Create comparison dictionary - comparison = {name: {} for name in model_names} - - for i, (name, metrics) in enumerate(zip(model_names, metrics_list)): - for metric in common_metrics: - if metric in metrics: - comparison[name][metric] = metrics[metric] - # Handle nested metrics - elif "test_metrics" in metrics and metric in metrics["test_metrics"]: - comparison[name][metric] = metrics["test_metrics"][metric] - - # Convert to DataFrame for easier comparison - comp_df = pd.DataFrame(comparison).T - - # Format time metrics if present - time_cols = [col for col in comp_df.columns if "time" in col] - for col in time_cols: - if col in comp_df.columns: - comp_df[f"{col}_formatted"] = comp_df[col].apply(format_time) - - # Save comparison to CSV and JSON - if output_dir: - os.makedirs(output_dir, exist_ok=True) - - # Save as CSV - csv_path = os.path.join(output_dir, "model_comparison.csv") - comp_df.to_csv(csv_path) - - # Save as JSON - json_path = os.path.join(output_dir, "model_comparison.json") - comp_df.to_json(json_path, orient="index", indent=2) - - # Create comparison plots - for metric in common_metrics: - if metric in comp_df.columns: - plt.figure(figsize=(10, 6)) - - # Skip time metrics for bar plots (they're often on different scales) - if "time" in metric: - continue - - # Create bar plot - ax = sns.barplot(x=comp_df.index, y=comp_df[metric]) - plt.title(f"Comparison of {metric}") - plt.ylabel(metric) - plt.xlabel("Model") - - # Add value labels on top of each bar - for i, v in enumerate(comp_df[metric]): - ax.text(i, v, f"{v:.4f}", ha="center", va="bottom") - - plt.xticks(rotation=45) - plt.tight_layout() - - # Save the plot - plot_path = os.path.join(output_dir, f"compare_{metric}.png") - plt.savefig(plot_path) - plt.close() - - # Log to MLflow if active - try: - mlflow.log_artifact(plot_path) - except: - # MLflow might not be active - pass - - # Log to MLflow if active - try: - mlflow.log_artifact(csv_path) - mlflow.log_artifact(json_path) - except: - # MLflow might not be active - pass - - return comp_df + return np.mean(times) diff --git a/src/training/training_entrypoint.sh b/src/training/training_entrypoint.sh new file mode 100644 index 00000000..2be780ee --- /dev/null +++ b/src/training/training_entrypoint.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +echo "[$(date '+%Y-%m-%d %H:%M:%S')] 🚀 [ENTRYPOINT] Starting container setup..." + +REQUIREMENTS_FILE="/app/requirements.txt" + +if [ ! -f "$REQUIREMENTS_FILE" ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ requirements.txt not found at $REQUIREMENTS_FILE" + exit 1 +fi + +echo "[$(date '+%Y-%m-%d %H:%M:%S')] ⚡ Installing uv (ultra-fast installer)..." +python3 -m pip install --no-cache-dir uv +if [ $? -ne 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Failed to install uv" + exit 1 +fi + +# Verify uv is accessible +if ! command -v uv >/dev/null 2>&1; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ uv command not found in PATH, falling back to pip" + echo "[$(date '+%Y-%m-%d %H:%M:%S')] 📦 Installing requirements with pip..." + python3 -m pip install --no-cache-dir -r "$REQUIREMENTS_FILE" + if [ $? -ne 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Failed to install requirements with pip" + exit 1 + fi + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ All requirements installed with pip." +else + echo "[$(date '+%Y-%m-%d %H:%M:%S')] 📦 Installing requirements with uv..." + uv pip install --no-cache --requirement "$REQUIREMENTS_FILE" + if [ $? -ne 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Failed to install requirements with uv, falling back to pip..." + python3 -m pip install --no-cache-dir -r "$REQUIREMENTS_FILE" + if [ $? -ne 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Failed to install requirements with pip" + exit 1 + fi + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ All requirements installed with pip (fallback)." + else + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ All requirements installed with uv." + fi +fi + +echo "[$(date '+%Y-%m-%d %H:%M:%S')] 🚀 Executing: $@" +exec "$@" From 970eb89be480bdd0a90261c321e3c7959f77a04f Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Fri, 11 Jul 2025 10:03:21 +0530 Subject: [PATCH 084/195] fixed ruff format issues --- src/training/scripts/train.py | 30 ++++++++++++++++-------------- src/training/scripts/utils.py | 30 ++++++++++++++++-------------- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/src/training/scripts/train.py b/src/training/scripts/train.py index 27f5a077..8e339b61 100644 --- a/src/training/scripts/train.py +++ b/src/training/scripts/train.py @@ -12,7 +12,7 @@ from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, - get_linear_schedule_with_warmup + get_linear_schedule_with_warmup, ) from scripts.utils import ( @@ -22,7 +22,7 @@ set_random_seeds, measure_inference_time, evaluate, - update_job_status + update_job_status, ) import mlflow @@ -41,7 +41,7 @@ PROCESSED_DATASET_DIR, TEST_SIZE, VALIDATION_SIZE, - RANDOM_STATE + RANDOM_STATE, ) from transformers.onnx import export from transformers.onnx.features import FeaturesManager @@ -138,6 +138,7 @@ def convert_model_to_onnx(model_dir: str): logger.error(f"ONNX export failed: {e}") return None + def validate_processed_dataset(processed_dataset_dir: str) -> Dict[str, Any]: """ Validate that the processed dataset has all required files. @@ -291,17 +292,17 @@ def load_data_from_dataset_folder( def train_epoch(model, dataloader, optimizer, scheduler, device): """ Execute one training epoch with forward pass, backpropagation, and optimization. - + Args: model (torch.nn.Module): The model to train. dataloader (DataLoader): Training data batches. optimizer (torch.optim.Optimizer): Optimizer for parameter updates. scheduler: Learning rate scheduler. device (torch.device): Device for computation (CPU/CUDA). - + Returns: float: Average training loss for the epoch. - + Note: Includes gradient clipping (max_norm=1.0) for training stability. """ @@ -332,6 +333,7 @@ def train_epoch(model, dataloader, optimizer, scheduler, device): return total_loss / len(dataloader) + def train_single_model( model_type: str, train_texts: List[str], @@ -348,7 +350,7 @@ def train_single_model( ) -> Dict[str, Any]: """ Train a single transformer model for text classification with MLflow tracking. - + Args: model_type (str): Model architecture (e.g., "bert", "roberta"). train_texts, train_labels: Training data and labels. @@ -359,10 +361,10 @@ def train_single_model( class_names (List[str]): Class names for logging. output_dir (str): Directory to save trained model. args: Training hyperparameters and configuration. - + Returns: Dict[str, Any]: Training results with status, metrics, and model paths. - + Note: Uses early stopping, MLflow tracking, and saves best model with ONNX export. """ @@ -549,10 +551,10 @@ def train_single_model( def train_multiple_models(args): """ Train multiple transformer models and select the best performing one. - + Orchestrates multi-model training pipeline with data loading, model training, evaluation, and best model processing including ONNX export and S3 upload. - + Args: args (argparse.Namespace): Command line arguments containing: - model_types (str): JSON string of model types (e.g., '["bert", "roberta"]') @@ -561,7 +563,7 @@ def train_multiple_models(args): - output_dir (str): Directory to save outputs - model_id, job_id (int): Unique identifiers - Training hyperparameters (num_epochs, batch_size, learning_rate, etc.) - + Returns: Dict[str, Any]: Preprocessed training results containing: - training_summary: Overall statistics and best model info @@ -569,12 +571,12 @@ def train_multiple_models(args): - model_comparison: Performance comparison across models - best_model_s3_path: S3 path of uploaded best model (if successful) - best_model_onnx_path: ONNX model path (if successful) - + Example: >>> args = argparse.Namespace(model_types='["bert", "roberta"]', ...) >>> results = train_multiple_models(args) >>> print(f"Best model: {results['training_summary']['best_overall_model']}") - + Note: - Downloads data from S3, trains models sequentially with MLflow tracking - Uses early stopping, exports best model to ONNX, uploads to S3 diff --git a/src/training/scripts/utils.py b/src/training/scripts/utils.py index 13a8f4c2..63c969fb 100644 --- a/src/training/scripts/utils.py +++ b/src/training/scripts/utils.py @@ -15,7 +15,7 @@ from sklearn.metrics import ( accuracy_score, precision_recall_fscore_support, - roc_auc_score + roc_auc_score, ) from scripts.constants import ( LOG_DIRECTORY, @@ -293,6 +293,7 @@ def preprocess_training_summary_from_dict( logger.error(f"Error preprocessing training summary: {str(e)}") raise + def set_random_seeds(seed_val=SEED): random.seed(seed_val) np.random.seed(seed_val) @@ -300,23 +301,24 @@ def set_random_seeds(seed_val=SEED): torch.cuda.manual_seed_all(seed_val) set_seed(seed_val) + def evaluate(model, dataloader, device, num_labels): """ Evaluate a trained model on a given dataset and return predictions, true labels, and probabilities. - + This function performs inference on the provided dataloader to compute model predictions, extract true labels, and calculate class probabilities. It's used for validation during training and final testing to assess model performance. - + Args: - model (torch.nn.Module): The trained PyTorch model to evaluate. Should be a + model (torch.nn.Module): The trained PyTorch model to evaluate. Should be a transformers AutoModelForSequenceClassification or compatible model. dataloader (torch.utils.data.DataLoader): DataLoader containing the evaluation dataset. Expected to yield batches with 'input_ids', 'attention_mask', and 'label' keys. device (torch.device): The device (CPU or CUDA) where the model and data should be processed. num_labels (int): Number of classes in the classification task. Used for validation but not directly in computation. - + Returns: tuple: A tuple containing three numpy arrays: - predictions (np.ndarray): Array of predicted class indices with shape (n_samples,). @@ -325,16 +327,16 @@ def evaluate(model, dataloader, device, num_labels): Contains the actual labels from the dataset. - all_probs (np.ndarray): Array of class probabilities with shape (n_samples, num_labels). Contains softmax probabilities for each class for each sample. - + Example: >>> # During validation >>> val_preds, val_labels, val_probs = evaluate(model, val_dataloader, device, num_labels) >>> val_metrics, _ = compute_metrics(val_preds, val_labels, val_probs, class_names) - >>> + >>> >>> # During final testing >>> test_preds, test_labels, test_probs = evaluate(model, test_dataloader, device, num_labels) >>> test_metrics, test_cm = compute_metrics(test_preds, test_labels, test_probs, class_names) - + Notes: - The function sets the model to evaluation mode (model.eval()) and disables gradient computation (torch.no_grad()) for efficient inference. @@ -343,13 +345,13 @@ def evaluate(model, dataloader, device, num_labels): - The function processes data in batches to handle large datasets efficiently. - This function is compatible with Hugging Face transformers models and custom PyTorch models that return outputs with a 'logits' attribute. - + Raises: RuntimeError: If the model forward pass fails (e.g., shape mismatches, CUDA errors). KeyError: If the expected keys ('input_ids', 'attention_mask', 'label') are missing from dataloader batches. AttributeError: If the model outputs don't have the expected 'logits' attribute. - + See Also: - compute_metrics(): Used to calculate evaluation metrics from the returned arrays - measure_inference_time(): For measuring model inference speed @@ -381,20 +383,20 @@ def evaluate(model, dataloader, device, num_labels): def measure_inference_time(model, dataloader, device, num_runs=100): """ Measure the average inference time per batch for model performance benchmarking. - + Args: model (torch.nn.Module): The trained model to benchmark. dataloader (torch.utils.data.DataLoader): DataLoader with test batches. device (torch.device): Device for model computation (CPU/CUDA). num_runs (int, optional): Number of batches to time. Defaults to 100. - + Returns: float: Average inference time per batch in seconds. - + Example: >>> avg_time = measure_inference_time(model, test_dataloader, device) >>> mlflow.log_metric("avg_inference_time_seconds", avg_time) - + Note: Used for production deployment planning and model comparison. """ From b8015f1e0a8b65fa0002e2ab3745c80cbbbfc7c1 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Fri, 11 Jul 2025 10:09:33 +0530 Subject: [PATCH 085/195] fixed issue --- DSL/CronManager/DSL/data_resync.yml | 4 ++-- DSL/CronManager/DSL/data_sync.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DSL/CronManager/DSL/data_resync.yml b/DSL/CronManager/DSL/data_resync.yml index 7ba86d3b..dc9c53cc 100644 --- a/DSL/CronManager/DSL/data_resync.yml +++ b/DSL/CronManager/DSL/data_resync.yml @@ -1,5 +1,5 @@ agency_data_resync: - # trigger: "0 0/1 * * * ?" - trigger: off + trigger: "0 0/1 * * * ?" + # trigger: off type: exec command: "../app/scripts/agency_data_resync.sh -s 10" diff --git a/DSL/CronManager/DSL/data_sync.yml b/DSL/CronManager/DSL/data_sync.yml index 32b8cfa0..fe85ca33 100644 --- a/DSL/CronManager/DSL/data_sync.yml +++ b/DSL/CronManager/DSL/data_sync.yml @@ -1,5 +1,5 @@ agency_data_sync: - # trigger: "0 0/1 * * * ?" - trigger: off + trigger: "0 0/1 * * * ?" + # trigger: off type: exec command: "../app/scripts/agency_data_sync.sh -s 10" From e901cf1464391fe416964d3f765785463b72a87f Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Fri, 11 Jul 2025 16:21:36 +0530 Subject: [PATCH 086/195] feat: Enhance data model management with new SQL scripts and update training logic --- .../hbs/format_connected_models.handlebars | 5 ++ ...lobal-classifier-script-v9-data-models.sql | 4 +- .../POST/get-datasets-connected-models.sql | 10 +++ .../update-datamodels-training-status.sql | 50 ++++++++++++ .../GET/datasets/metadata.yml | 44 ++++++++-- .../POST/datamodels/create.yml | 61 +++++++------- .../POST/datamodels/major.yml | 38 +++++++-- .../POST/datamodels/minor.yml | 37 +++++++-- .../POST/datamodels/update-training.yml | 80 +++++++++++++++++++ .../FormElements/FormSelect/index.tsx | 2 +- .../pages/DataModels/ConfigureDataModel.tsx | 53 ++++++------ GUI/src/pages/ViewDataset/index.tsx | 5 +- GUI/src/services/datasets.ts | 2 +- GUI/src/utils/commonUtilts.ts | 5 +- 14 files changed, 313 insertions(+), 83 deletions(-) create mode 100644 DSL/DMapper/global-classifier/hbs/format_connected_models.handlebars create mode 100644 DSL/Resql/global-classifier/POST/get-datasets-connected-models.sql create mode 100644 DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql create mode 100644 DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml diff --git a/DSL/DMapper/global-classifier/hbs/format_connected_models.handlebars b/DSL/DMapper/global-classifier/hbs/format_connected_models.handlebars new file mode 100644 index 00000000..ec47e7eb --- /dev/null +++ b/DSL/DMapper/global-classifier/hbs/format_connected_models.handlebars @@ -0,0 +1,5 @@ +[ + {{#each connectedModels}} + "{{modelName}} V{{major}}.{{minor}}"{{#unless @last}},{{/unless}} + {{/each}} +] \ No newline at end of file diff --git a/DSL/Liquibase/changelog/global-classifier-script-v9-data-models.sql b/DSL/Liquibase/changelog/global-classifier-script-v9-data-models.sql index fd983325..2f9cf12e 100644 --- a/DSL/Liquibase/changelog/global-classifier-script-v9-data-models.sql +++ b/DSL/Liquibase/changelog/global-classifier-script-v9-data-models.sql @@ -10,7 +10,7 @@ CREATE TYPE training_status AS ENUM ('retraining_needed', 'trained', 'training_i -- Create deployment environment enum CREATE TYPE deployment_environment AS ENUM ('undeployed', 'testing', 'production'); -CREATE TYPE base_models AS ENUM ('distil-bert', 'roberta', 'bert'); +CREATE TYPE base_models AS ENUM ('bert', 'roberta', 'xlm'); -- changeset erangi:global-classifier-models-metadata-table CREATE TABLE public.data_models ( @@ -45,6 +45,6 @@ CREATE TABLE model_configurations ( -- changeset erangi:global-classifier-model_configurations-add-data INSERT INTO model_configurations (base_models, deployment_environments) VALUES ( - '["distil-bert", "roberta", "bert"]'::JSONB, + '["bert", "roberta", "xlm"]'::JSONB, ARRAY['undeployed', 'testing', 'production']::deployment_environment[] ); \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-datasets-connected-models.sql b/DSL/Resql/global-classifier/POST/get-datasets-connected-models.sql new file mode 100644 index 00000000..5fda0eb2 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/get-datasets-connected-models.sql @@ -0,0 +1,10 @@ +SELECT + dm.model_name, + dm.major, + dm.minor +FROM public.data_models dm +WHERE dm.model_id = ANY( + SELECT jsonb_array_elements_text(connected_models)::BIGINT + FROM public.datasets + WHERE id = :datasetId +); \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql b/DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql new file mode 100644 index 00000000..2f9cf12e --- /dev/null +++ b/DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql @@ -0,0 +1,50 @@ +-- liquibase formatted sql + +-- changeset erangi:global-classifier-integrated-agency-enums +-- Create model status enum +CREATE TYPE model_status AS ENUM ('active', 'deprecated'); + +-- Create training status enum +CREATE TYPE training_status AS ENUM ('retraining_needed', 'trained', 'training_in_progress', 'training_failed', 'initiating_training'); + +-- Create deployment environment enum +CREATE TYPE deployment_environment AS ENUM ('undeployed', 'testing', 'production'); + +CREATE TYPE base_models AS ENUM ('bert', 'roberta', 'xlm'); + +-- changeset erangi:global-classifier-models-metadata-table +CREATE TABLE public.data_models ( + model_id BIGSERIAL PRIMARY KEY, + model_group_key VARCHAR(255) NOT NULL, + model_name VARCHAR(255) NOT NULL, + major INTEGER NOT NULL, + minor INTEGER NOT NULL, + latest BOOLEAN DEFAULT false, + deployment_env deployment_environment NOT NULL, + training_status training_status DEFAULT 'initiating_training', + base_models JSONB NOT NULL, + last_trained TIMESTAMP WITH TIME ZONE, + connected_ds_id BIGINT NOT NULL, + connected_ds_major_version INTEGER, + connected_ds_minor_version INTEGER, + model_status model_status DEFAULT 'active', + model_s3_location VARCHAR(2048), + training_results JSONB, + created_timestamp TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_timestamp TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +); + +-- changeset erangi:global-classifier-model_configurations +CREATE TABLE model_configurations ( + id BIGINT NOT NULL GENERATED BY DEFAULT AS IDENTITY, + base_models JSONB, + deployment_environments deployment_environment[], + CONSTRAINT model_configurations_pkey PRIMARY KEY (id) +); + +-- changeset erangi:global-classifier-model_configurations-add-data +INSERT INTO model_configurations (base_models, deployment_environments) VALUES +( + '["bert", "roberta", "xlm"]'::JSONB, + ARRAY['undeployed', 'testing', 'production']::deployment_environment[] +); \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/GET/datasets/metadata.yml b/DSL/Ruuter.private/global-classifier/GET/datasets/metadata.yml index 33c78559..f77bab78 100644 --- a/DSL/Ruuter.private/global-classifier/GET/datasets/metadata.yml +++ b/DSL/Ruuter.private/global-classifier/GET/datasets/metadata.yml @@ -1,7 +1,7 @@ declaration: call: declare version: 0.1 - description: "Get dataset metadata by id" + description: "Get dataset metadata by id with connected models" method: get accepts: json returns: json @@ -15,6 +15,7 @@ declaration: extractRequestData: assign: datasetId: ${Number(incoming.params.datasetId)} + next: getDatasetMetadata getDatasetMetadata: call: http.post @@ -23,20 +24,51 @@ getDatasetMetadata: body: id: ${datasetId} result: dataset_res - next: check_result + next: checkDatasetResult -check_result: +checkDatasetResult: switch: - - condition: ${dataset_res.response.body.length === 0} + - condition: ${!dataset_res || !dataset_res.response.body || dataset_res.response.body.length === 0} next: return_not_found + next: getConnectedModels + +getConnectedModels: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/get-datasets-connected-models" + body: + datasetId: ${datasetId} + result: connected_models_res + next: formatConnectedModels + +formatConnectedModels: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_DMAPPER]/hbs/global-classifier/format_connected_models" + headers: + type: json + body: + connectedModels: ${connected_models_res.response.body} + result: formatted_models_res + next: buildFinalResponse + +buildFinalResponse: + assign: + dataset: ${dataset_res.response.body[0]} + finalResponse: + response: + - id: ${dataset.id} + major: ${dataset.major} + minor: ${dataset.minor} + connectedModels: ${formatted_models_res.response.body} next: return_result return_not_found: - return: "Dataset not found" + return: '{"error": "Dataset not found"}' status: 404 next: end return_result: - return: ${dataset_res.response.body} + return: ${finalResponse} status: 200 next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml index e10db9c6..facd9926 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml @@ -1,7 +1,7 @@ declaration: call: declare version: 0.1 - description: "Create a new data model record in data_models and update connected dataset" + description: "Create a new data model record in data_models, update connected dataset and initiate training" method: post accepts: json returns: json @@ -17,7 +17,7 @@ declaration: description: "Deployment environment: production, testing, undeployed (required)" - field: baseModels type: array - description: "Array of base models: ['distil-bert', 'bert', 'roberta'] (required)" + description: "Array of base models: ['bert', 'roberta', 'xlm'] (required)" - field: connectedDsId type: number description: "Connected dataset ID (required)" @@ -56,30 +56,8 @@ validateEnumValues: switch: - condition: ${!["production", "testing", "undeployed"].includes(deploymentEnv)} next: return_invalid_deployment_env - - condition: ${!baseModels.every(model => ["distil-bert", "roberta", "bert"].includes(model))} + - condition: ${!baseModels.every(model => ["bert", "roberta", "xlm"].includes(model))} next: return_invalid_base_model - next: checkProductionDeployment - -# Check if Production Deployment -checkProductionDeployment: - switch: - - condition: ${deploymentEnv === "production"} - next: updateExistingProductionModels - next: insertModelMetadata - -# Update Existing Production Models to Undeployed -updateExistingProductionModels: - call: http.post - args: - url: "[#GLOBAL_CLASSIFIER_RESQL]/update-datamodel-deployment-env" - result: update_production_res - next: checkProductionUpdateResult - -# Check Production Update Result -checkProductionUpdateResult: - switch: - - condition: ${!update_production_res || !update_production_res.response || !update_production_res.response.body} - next: return_production_update_failed next: insertModelMetadata # Insert Model Metadata @@ -120,10 +98,29 @@ checkUpdateResult: switch: - condition: ${!update_dataset_res || !update_dataset_res.response.body || update_dataset_res.response.body.length === 0} next: return_update_failed + next: initiateTraining + +# Initiate Training +initiateTraining: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RUUTER_PRIVATE]/datamodels/train" + body: + modelId: ${insert_model_res.response.body[0].modelId} + result: training_res + next: checkTrainingResult + +# Check Training Result +checkTrainingResult: + switch: + - condition: ${!training_res || !training_res.response} + next: return_training_failed + - condition: ${training_res.response.body[0].operationSuccessful !== true} + next: return_training_failed next: return_success return_success: - return: "Data model created successfully" + return: "Data model created successfully and training initiated. Model ID:${insert_model_res.response.body[0].modelId} Job ID: ${training_res.response.body[0].jobId}" status: 200 next: end @@ -138,15 +135,10 @@ return_invalid_deployment_env: next: end return_invalid_base_model: - return: "error: invalid baseModels. Each model must be one of: distil-bert, bert, roberta" + return: "error: invalid baseModels. Each model must be one of: bert, roberta, xlm" status: 400 next: end -return_production_update_failed: - return: "error: failed to update existing production models to undeployed before creating new production model" - status: 500 - next: end - return_insert_failed: return: "error: failed to create model metadata record" status: 500 @@ -155,4 +147,9 @@ return_insert_failed: return_update_failed: return: "error: model created but failed to update dataset connected_models" status: 500 + next: end + +return_training_failed: + return: "error: model created and dataset updated but failed to initiate training" + status: 500 next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml index 5c788d8e..e3c955a0 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml @@ -19,7 +19,7 @@ declaration: description: "Deployment environment: production, testing, undeployed (required)" - field: baseModels type: array - description: "Array of base models: ['distil-bert', 'bert', 'roberta'] (required)" + description: "Array of base models: ['bert', 'roberta', 'xlm'] (required)" - field: connectedDsId type: number description: "Connected dataset ID (required)" @@ -59,7 +59,7 @@ validateEnumValues: switch: - condition: ${!["production", "testing", "undeployed"].includes(deploymentEnv)} next: return_invalid_deployment_env - - condition: ${!baseModels.every(model => ["distil-bert", "roberta", "bert"].includes(model))} + - condition: ${!baseModels.every(model => ["bert", "roberta", "xlm"].includes(model))} next: return_invalid_base_model next: checkProductionDeployment @@ -170,14 +170,33 @@ checkDatasetUpdateResult: switch: - condition: ${!updateDatasetResult || !updateDatasetResult.response || !updateDatasetResult.response.body || updateDatasetResult.response.body.length === 0} next: return_dataset_update_failed - next: returnResult + next: initiateTraining -# Return Result -returnResult: - return: "Data model major version updated successfully" +# Initiate Training +initiateTraining: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RUUTER_PRIVATE]/datamodels/train" + body: + modelId: ${insertResult.response.body[0].modelId} + result: training_res + next: checkTrainingResult + +# Check Training Result +checkTrainingResult: + switch: + - condition: ${!training_res || !training_res.response} + next: return_training_failed + - condition: ${training_res.response.body[0].operationSuccessful !== true} + next: return_training_failed + next: return_success + +return_success: + return: "Data model created successfully and training initiated. Model ID:${insertResult.response.body[0].modelId} Job ID: ${training_res.response.body[0].jobId}" status: 200 next: end + return_missing_required_fields: return: "error: missing required fields (modelGroupKey, modelName, deploymentEnv, baseModels as array, connectedDsId, connectedDsMajorVersion, connectedDsMinorVersion)" status: 400 @@ -189,7 +208,7 @@ return_invalid_deployment_env: next: end return_invalid_base_model: - return: "error: invalid baseModels. Each model must be one of: distil-bert, bert, roberta" + return: "error: invalid baseModels. Each model must be one of: bert, roberta, xlm" status: 400 next: end @@ -206,4 +225,9 @@ return_insert_failed: return_dataset_update_failed: return: "error: model created but failed to update dataset connected_models" status: 500 + next: end + +return_training_failed: + return: "error: model created and dataset updated but failed to initiate training" + status: 500 next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml index 41512b33..ce221465 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml @@ -19,7 +19,7 @@ declaration: description: "Deployment environment: production, testing, undeployed (required)" - field: baseModels type: array - description: "Array of base models: ['distil-bert', 'bert', 'roberta'] (required)" + description: "Array of base models: ['bert', 'roberta', 'xlm'] (required)" - field: connectedDsId type: number description: "Connected dataset ID (required)" @@ -59,7 +59,7 @@ validateEnumValues: switch: - condition: ${!["production", "testing", "undeployed"].includes(deploymentEnv)} next: return_invalid_deployment_env - - condition: ${!baseModels.every(model => ["distil-bert", "roberta", "bert"].includes(model))} + - condition: ${!baseModels.every(model => ["bert", "roberta", "xlm"].includes(model))} next: return_invalid_base_model next: checkProductionDeployment @@ -162,11 +162,29 @@ checkDatasetUpdateResult: switch: - condition: ${!updateDatasetResult || !updateDatasetResult.response || !updateDatasetResult.response.body || updateDatasetResult.response.body.length === 0} next: return_dataset_update_failed - next: returnResult + next: initiateTraining -# Return Result -returnResult: - return: "Data model minor version updated successfully" +# Initiate Training +initiateTraining: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RUUTER_PRIVATE]/datamodels/train" + body: + modelId: ${insertResult.response.body[0].modelId} + result: training_res + next: checkTrainingResult + +# Check Training Result +checkTrainingResult: + switch: + - condition: ${!training_res || !training_res.response} + next: return_training_failed + - condition: ${training_res.response.body[0].operationSuccessful !== true} + next: return_training_failed + next: return_success + +return_success: + return: "Data model created successfully and training initiated. Model ID:${insertResult.response.body[0].modelId} Job ID: ${training_res.response.body[0].jobId}" status: 200 next: end @@ -181,7 +199,7 @@ return_invalid_deployment_env: next: end return_invalid_base_model: - return: "error: invalid baseModels. Each model must be one of: distil-bert, bert, roberta" + return: "error: invalid baseModels. Each model must be one of: bert, roberta, xlm" status: 400 next: end @@ -198,4 +216,9 @@ return_insert_failed: return_dataset_update_failed: return: "error: model created but failed to update dataset connected_models" status: 500 + next: end + +return_training_failed: + return: "error: model created and dataset updated but failed to initiate training" + status: 500 next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml new file mode 100644 index 00000000..e1c597ab --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml @@ -0,0 +1,80 @@ +declaration: + call: declare + version: 0.1 + description: "Update data model training status to trained and set training results" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: modelId + type: number + description: "Model ID to update (required)" + - field: trainingResults + type: object + description: "Training results object (required)" + next: extractRequestData + +# Data Extraction +extractRequestData: + assign: + modelId: ${incoming.body.modelId} + trainingResults: ${incoming.body.trainingResults} + next: validateRequiredFields + +# Required Field Validation +validateRequiredFields: + switch: + - condition: ${modelId === null || modelId === undefined || !modelId} + next: return_missing_model_id + - condition: ${trainingResults === null || trainingResults === undefined} + next: return_missing_training_results + next: updateTrainingStatus + +# Update Training Status +updateTrainingStatus: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/update-datamodels-training-status" + body: + modelId: ${modelId} + trainingResults: ${JSON.stringify(trainingResults)} + result: update_result + next: checkUpdateResult + +# Check Update Result +checkUpdateResult: + switch: + - condition: ${!update_result || !update_result.response || !update_result.response.body} + next: return_update_failed + - condition: ${update_result.response.body.length === 0} + next: return_model_not_found + next: return_success + +# Success Response +return_success: + return: "Training status updated successfully model ID : ${update_result.response.body[0].modelId}" + status: 200 + next: end + +# Error Responses +return_missing_model_id: + return: '{"error": "Missing required field: modelId"}' + status: 400 + next: end + +return_missing_training_results: + return: '{"error": "Missing required field: trainingResults"}' + status: 400 + next: end + +return_model_not_found: + return: '{"error": "Model not found with the provided modelId"}' + status: 404 + next: end + +return_update_failed: + return: '{"error": "Failed to update training status"}' + status: 500 + next: end \ No newline at end of file diff --git a/GUI/src/components/FormElements/FormSelect/index.tsx b/GUI/src/components/FormElements/FormSelect/index.tsx index 37bb89cd..e1187a49 100644 --- a/GUI/src/components/FormElements/FormSelect/index.tsx +++ b/GUI/src/components/FormElements/FormSelect/index.tsx @@ -95,7 +95,7 @@ const FormSelect = forwardRef( const selectClasses = clsx('select', disabled && 'select--disabled'); const placeholderValue = - placeholder || t('datasetGroups.createDataset.selectPlaceholder'); + placeholder || t('global.select'); return (
    diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index d19eb4d1..0735bd37 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -17,6 +17,7 @@ import './DataModels.scss'; import { configureDataModel, getDataModelMetadata } from 'services/datamodels'; import { use } from 'i18next'; import { set } from 'date-fns'; +import { areArraysEqual } from 'utils/commonUtilts'; const ConfigureDataModel: FC = () => { const { t } = useTranslation(); @@ -39,7 +40,7 @@ const ConfigureDataModel: FC = () => { const [initialData, setInitialData] = useState>({ modelName: modelMetadata?.modelName, datasetId: modelMetadata?.connectedDsId, - baseModels:modelMetadata?.baseModels, + baseModels: modelMetadata?.baseModels, deploymentEnvironment: modelMetadata?.deploymentEnv, version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, }); @@ -54,7 +55,7 @@ const ConfigureDataModel: FC = () => { }); useEffect(() => { - setInitialData({ + setInitialData({ modelId: modelMetadata?.modelId, modelName: modelMetadata?.modelName, datasetId: modelMetadata?.connectedDsId.toString(), @@ -83,23 +84,23 @@ const ConfigureDataModel: FC = () => { })); }; - const mutation = useMutation({ - mutationFn: configureDataModel, - onSuccess: () => { - open({ - title: t('dataModels.configureDataModel.saveChangesTitile'), - content: t('dataModels.configureDataModel.saveChangesDesc'), - footer: (
    ) - }); - - }, - onError: () => { - open({ - title: t('dataModels.configureDataModel.updateErrorTitile'), - content: t('dataModels.configureDataModel.updateErrorDesc'), - }); - }, - }); + const updateMutation = useMutation({ + mutationFn: configureDataModel, + onSuccess: () => { + open({ + title: t('dataModels.configureDataModel.saveChangesTitile'), + content: t('dataModels.configureDataModel.saveChangesDesc'), + footer: (
    ) + }); + + }, + onError: () => { + open({ + title: t('dataModels.configureDataModel.updateErrorTitile'), + content: t('dataModels.configureDataModel.updateErrorDesc'), + }); + }, + }); const handleSaveChanges = () => { const payload = getChangedAttributes(initialData, dataModel); @@ -121,7 +122,7 @@ const ConfigureDataModel: FC = () => { updateType: updateType ?? "", }; - mutation.mutate(updatedPayload); + updateMutation.mutate(updatedPayload); }; @@ -142,6 +143,7 @@ const ConfigureDataModel: FC = () => { setModalTitle(title); modalFunciton.current = onConfirm; }; + return (
    @@ -194,15 +196,16 @@ const ConfigureDataModel: FC = () => { {t('dataModels.configureDataModel.deleteModal')} +
    + ), + }); + }, + onError: () => { + open({ + title: t('dataModels.configureDataModel.deleteModalErrorTitle'), + content: t('dataModels.configureDataModel.deleteModalErrorDesc'), + }); + }, + }); + const handleDelete = () => { + if (dataModel.deploymentEnvironment === Maturity.PRODUCTION) { + openModal( + t('dataModels.configureDataModel.deleteErrorDesc'), + t('dataModels.configureDataModel.deleteErrorTitle'), + () => navigate('/data-models'), + 'warning' + ); + } else { + openModal( + t('dataModels.configureDataModel.deleteConfirmationDesc'), + t('dataModels.configureDataModel.deleteConfirmation'), + () => deleteDataModelMutation.mutate(modelId), + 'delete' + ); + } }; @@ -143,7 +194,7 @@ const ConfigureDataModel: FC = () => { setModalTitle(title); modalFunciton.current = onConfirm; }; - + return (
    @@ -156,7 +207,7 @@ const ConfigureDataModel: FC = () => {
    - {/* + {modelMetadata?.modelStatus === "deprecated" && (
    @@ -165,13 +216,13 @@ const ConfigureDataModel: FC = () => {
    - */} - + )} {false ? ( ) : ( @@ -189,15 +240,15 @@ const ConfigureDataModel: FC = () => { > ) : modalType === 'delete' ? ( - ) : ( - - )} + ) + : modalType === 'warning' ? ( + + ) + : ( + null + )}
    } > diff --git a/GUI/src/pages/DataModels/CreateDataModel.tsx b/GUI/src/pages/DataModels/CreateDataModel.tsx index 18a7e935..3714d823 100644 --- a/GUI/src/pages/DataModels/CreateDataModel.tsx +++ b/GUI/src/pages/DataModels/CreateDataModel.tsx @@ -14,13 +14,13 @@ import { ErrorsType, } from 'types/dataModels'; import { da } from 'date-fns/locale'; -import { createDataModel } from 'services/datamodels'; +import { createDataModel, getProductionDataModel } from 'services/datamodels'; +import { dataModelsQueryKeys } from 'utils/queryKeys'; const CreateDataModel: FC = () => { const { t } = useTranslation(); const { open, close } = useDialog(); const navigate = useNavigate(); - const [availableProdModels, setAvailableProdModels] = useState([]); const [dataModel, setDataModel] = useState>({ modelName: '', @@ -30,6 +30,11 @@ const CreateDataModel: FC = () => { version: 'V1.0', }); + const { data: prodDataModel, isLoading: isProdDataModelLoading } = useQuery({ + queryKey: dataModelsQueryKeys.GET_PROD_DATA_MODEL(), + queryFn: () => getProductionDataModel(), + }); + const handleDataModelAttributesChange = (name: string, value: string) => { setDataModel((prevFilters) => ({ ...prevFilters, @@ -91,7 +96,16 @@ const CreateDataModel: FC = () => { connectedDsMajorVersion: Number(dataModel?.version?.split('.')[0]?.[1]) ?? "", connectedDsMinorVersion: Number(dataModel?.version?.split('.')[1]) ?? "", } + + if (prodDataModel && dataModel.deploymentEnvironment==="production") { + open({ + title: t('dataModels.createDataModel.replaceTitle'), + content: t('dataModels.createDataModel.replaceDesc'), + footer: (
    ) + }); + } else { mutation.mutate(paylod); + } }; const isCreateDisabled = () => { diff --git a/GUI/src/pages/DataModels/DataModels.scss b/GUI/src/pages/DataModels/DataModels.scss index 4fd87663..89d86e77 100644 --- a/GUI/src/pages/DataModels/DataModels.scss +++ b/GUI/src/pages/DataModels/DataModels.scss @@ -80,7 +80,12 @@ body { } .metadata-card{ - padding: 20px 150px; justify-content: center; text-align: center; + background-color:#FFE8E9 ; + color:#D73E3E; + border-radius: .3rem; + padding: 2rem; + border: 1px solid #D73E3E; + margin-bottom: 2rem; } diff --git a/GUI/src/services/datamodels.ts b/GUI/src/services/datamodels.ts index c9e61ba6..52699ed7 100644 --- a/GUI/src/services/datamodels.ts +++ b/GUI/src/services/datamodels.ts @@ -73,4 +73,11 @@ export async function configureDataModel(payload: { } const { data } = await apiDev.post(endpoint, payload); return data?.response ?? {}; +} + +export async function deleteDataModel(modelId: number | string | null) { + const { data } = await apiDev.post(dataModelsEndpoints.DELETE_MODEL(), { + modelId, + }); + return data?.response ?? {}; } \ No newline at end of file diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index 982a8abc..89aff5d6 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -55,6 +55,7 @@ export const dataModelsEndpoints = { CREATE_MODEL: (): string => '/global-classifier/datamodels/create', CREATE_MAJOR_VERSION: (): string => '/global-classifier/datamodels/major', CREATE_MINOR_VERSION: (): string => '/global-classifier/datamodels/minor', + DELETE_MODEL: (): string => '/global-classifier/datamodels/delete', GET_DATAMODELS_FILTERS: (): string => diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index ad5d2ffa..3c76e779 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -390,6 +390,8 @@ "deleteConfirmationDesc": "Confirm that you are wish to delete the following data model", "deleteModalErrorTitle": "Error deleting data model", "deleteModalErrorDesc": "There was an issue deleting the data model. Please try again. If the problem persists, contact support for assistance.", + "deleteModalSuccessTitle": "Model Deleted Successfully", + "deleteModalSuccessDesc": "You have successfully deleted the data model. The model is no longer available and all related data has been removed.", "retrainDataModalErrorTitle": "Error retraining data model", "retrainDataModalErrorDesc":"There was an issue retraining the data model. Please try again. If the problem persists, contact support for assistance." , "title": "Configure Data Model", From 2dac626f5435962708795fadbd5e26bd37e0ed2a Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Mon, 14 Jul 2025 10:58:44 +0530 Subject: [PATCH 090/195] training bug fixes --- .../script/train_script_starter.sh | 41 ++++++--- .../POST/check-training-job-by-model-id.sql | 18 ---- .../get-data-model-info-by-given-model-id.sql | 2 +- .../POST/get-data-model-training-job.sql | 6 -- ...ng-job.sql => get-queued-training-job.sql} | 0 ...> get-training-job-status-in-progress.sql} | 0 .../update-datamodels-training-status.sql | 18 ++++ .../POST/{datamodel => datamodels}/train.yml | 4 +- .../POST/datamodels/update-training.yml | 92 +++++++++++++++++++ ....timestamp-1752210900745-20a2e6a47a881.mjs | 69 ++++++++++++++ config.env | 6 +- sidecar.env | 2 +- src/training/scripts/constants.py | 3 + src/training/scripts/s3_utility_handler.py | 4 +- src/training/scripts/train.py | 23 ++++- src/training/scripts/utils.py | 74 ++++++++++++--- 16 files changed, 300 insertions(+), 62 deletions(-) delete mode 100644 DSL/Resql/global-classifier/POST/check-training-job-by-model-id.sql delete mode 100644 DSL/Resql/global-classifier/POST/get-data-model-training-job.sql rename DSL/Resql/global-classifier/POST/{get-first-come-training-job.sql => get-queued-training-job.sql} (100%) rename DSL/Resql/global-classifier/POST/{check-training-job-status-in-progress.sql => get-training-job-status-in-progress.sql} (100%) create mode 100644 DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql rename DSL/Ruuter.private/global-classifier/POST/{datamodel => datamodels}/train.yml (96%) create mode 100644 DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml create mode 100644 GUI/vite.config.ts.timestamp-1752210900745-20a2e6a47a881.mjs diff --git a/DSL/CronManager/script/train_script_starter.sh b/DSL/CronManager/script/train_script_starter.sh index 01086c41..86530f5b 100644 --- a/DSL/CronManager/script/train_script_starter.sh +++ b/DSL/CronManager/script/train_script_starter.sh @@ -2,8 +2,8 @@ # File: DSL/CronManager/script/train_script_starter.sh # API Endpoints -CHECK_JOB_STATUS_IN_PROGRESS_SQL="http://resql:8082/global-classifier/check-training-job-status-in-progress" -GET_FIRST_COME_TRAINING_JOB_SQL="http://resql:8082/global-classifier/get-first-come-training-job" +CHECK_JOB_STATUS_IN_PROGRESS_SQL="http://resql:8082/global-classifier/get-training-job-status-in-progress" +GET_FIRST_COME_TRAINING_JOB_SQL="http://resql:8082/global-classifier/get-queued-training-job" GET_DATA_MODEL_BY_MODEL_ID_SQL="http://resql:8082/global-classifier/get-data-model-info-by-given-model-id" UPDATE_JOB_STATUS="http://resql:8082/global-classifier/update-training-job-status" @@ -38,23 +38,33 @@ if [ -z "$response_first_come_training_job" ]; then exit 0 fi -# Handle explicit "no jobs" responses from API +# Handle explicit "no jobs" responses from API - INCLUDING EMPTY ARRAY if echo "$response_first_come_training_job" | grep -q '"hasQueuedJobs":false' || \ echo "$response_first_come_training_job" | grep -q '"modelId":null' || \ echo "$response_first_come_training_job" | grep -q '"jobId":null' || \ [ "$response_first_come_training_job" = "{}" ] || \ - [ "$response_first_come_training_job" = "null" ]; then + [ "$response_first_come_training_job" = "null" ] || \ + [ "$response_first_come_training_job" = "[]" ]; then echo "ℹ️ [INFO] No queued training jobs available. Queue is empty." echo "✅ [DONE] Training script starter completed - no work to do" exit 0 fi +# Extract model_id and job_id model_id=$(echo "$response_first_come_training_job" | sed -E 's/.*"modelId":[[:space:]]*([0-9]+).*/\1/') job_id=$(echo "$response_first_come_training_job" | sed -E 's/.*"jobId":"?([0-9a-zA-Z-]+)"?.*/\1/') if [ -z "$model_id" ]; then echo "❌ [ERROR] Model ID not found in response" + echo "🔍 [DEBUG] Raw response: '$response_first_come_training_job'" exit 1 fi + +if [ -z "$job_id" ] || [ "$job_id" = "$response_first_come_training_job" ]; then + echo "❌ [ERROR] Job ID not found or invalid in response" + echo "🔍 [DEBUG] Raw response: '$response_first_come_training_job'" + exit 1 +fi + echo "📦 [MODEL] Model ID: $model_id" echo "📦 [JOB] Job ID: $job_id" @@ -69,15 +79,23 @@ response_get_dataset_id=$(curl -s -X POST "$GET_DATA_MODEL_BY_MODEL_ID_SQL" \ -d "{\"model_id\": $model_id}") echo "🔍 [DEBUG] Dataset ID response: '$response_get_dataset_id'" -dataset_id=$(echo "$response_get_dataset_id" | sed -E 's/.*"datasetId":"?([0-9]+)"?.*/\1/') +# Handle empty response +if [ -z "$response_get_dataset_id" ] || [ "$response_get_dataset_id" = "[]" ]; then + echo "❌ [ERROR] No dataset information found for model ID: $model_id" + exit 1 +fi + +dataset_id=$(echo "$response_get_dataset_id" | sed -E 's/.*"connectedDsId":([0-9]+).*/\1/') -if [ -z "$dataset_id" ]; then - echo "❌ [ERROR] Dataset ID not found in response" +if [ -z "$dataset_id" ] || [ "$dataset_id" = "$response_get_dataset_id" ]; then + echo "❌ [ERROR] Connected Dataset ID not found in response" + echo "🔍 [DEBUG] Raw response: '$response_get_dataset_id'" exit 1 fi + echo "📦 [DATASET] Dataset ID: $dataset_id" -base_models_json=$(echo "$response_get_dataset_id" | sed -E 's/.*"baseModels":(\[[^]]*\]).*/\1/') +base_models_json=$(echo "$response_get_dataset_id" | sed -nE 's/.*"value":"(\[[^]]+\])".*/\1/p' | sed 's/\\"/"/g') if [[ "$base_models_json" == "["* ]] && [[ "$base_models_json" == *"]" ]]; then model_types="$base_models_json" @@ -89,13 +107,6 @@ else exit 1 fi -# Validate that model_types is not empty -if [[ -z "$model_types" ]] || [[ "$model_types" == "[]" ]]; then - echo "❌ [ERROR] No valid base models found in response" - echo "❌ [ERROR] Using fallback model types" - model_types='["bert","roberta"]' # Fallback -fi - # Activate existing virtualenv echo "✅ Activating existing virtualenv at /app/python_virtual_env" source /app/python_virtual_env/bin/activate || { echo "❌ Failed to activate virtualenv"; exit 1; } diff --git a/DSL/Resql/global-classifier/POST/check-training-job-by-model-id.sql b/DSL/Resql/global-classifier/POST/check-training-job-by-model-id.sql deleted file mode 100644 index 27b63f70..00000000 --- a/DSL/Resql/global-classifier/POST/check-training-job-by-model-id.sql +++ /dev/null @@ -1,18 +0,0 @@ -SELECT - EXISTS(SELECT 1 FROM ModelTrainingJobs WHERE model_id = :modelId) AS job_exists, - COALESCE( - (SELECT job_status - FROM ModelTrainingJobs - WHERE model_id = :modelId - ORDER BY created_at DESC - LIMIT 1), - 'no-job' - ) AS current_status, - COALESCE( - (SELECT job_id - FROM ModelTrainingJobs - WHERE model_id = :modelId - ORDER BY created_at DESC - LIMIT 1), - NULL - ) AS latest_job_id; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql b/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql index 656e5b21..4fec7683 100644 --- a/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql +++ b/DSL/Resql/global-classifier/POST/get-data-model-info-by-given-model-id.sql @@ -1,5 +1,5 @@ SELECT - dataset_id, + connected_ds_id, base_models FROM data_models WHERE model_id = :model_id; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-data-model-training-job.sql b/DSL/Resql/global-classifier/POST/get-data-model-training-job.sql deleted file mode 100644 index 47a15a3e..00000000 --- a/DSL/Resql/global-classifier/POST/get-data-model-training-job.sql +++ /dev/null @@ -1,6 +0,0 @@ -SELECT - EXISTS( - SELECT 1 - FROM ModelTrainingJobs - WHERE job_status = 'training-in-progress' - ) AS has_training_in_progress; \ No newline at end of file diff --git a/DSL/Resql/global-classifier/POST/get-first-come-training-job.sql b/DSL/Resql/global-classifier/POST/get-queued-training-job.sql similarity index 100% rename from DSL/Resql/global-classifier/POST/get-first-come-training-job.sql rename to DSL/Resql/global-classifier/POST/get-queued-training-job.sql diff --git a/DSL/Resql/global-classifier/POST/check-training-job-status-in-progress.sql b/DSL/Resql/global-classifier/POST/get-training-job-status-in-progress.sql similarity index 100% rename from DSL/Resql/global-classifier/POST/check-training-job-status-in-progress.sql rename to DSL/Resql/global-classifier/POST/get-training-job-status-in-progress.sql diff --git a/DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql b/DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql new file mode 100644 index 00000000..db61f738 --- /dev/null +++ b/DSL/Resql/global-classifier/POST/update-datamodels-training-status.sql @@ -0,0 +1,18 @@ +UPDATE public.data_models +SET + training_status = 'trained'::training_status, + training_results = :trainingResults::jsonb, + model_s3_location = :modelS3Location::text, + last_trained = CURRENT_TIMESTAMP, + updated_timestamp = CURRENT_TIMESTAMP +WHERE + model_id = :modelId +RETURNING + model_id, + model_group_key, + model_name, + major, + minor, + training_status, + training_results, + updated_timestamp; \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/train.yml similarity index 96% rename from DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml rename to DSL/Ruuter.private/global-classifier/POST/datamodels/train.yml index d18e2675..1b0fb37f 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodel/train.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/train.yml @@ -62,7 +62,9 @@ assign_fail_response: return_ok: status: 200 return: ${success_format_res} + next: end return_bad_request: status: 400 - return: ${fail_format_res} \ No newline at end of file + return: ${fail_format_res} + next: end \ No newline at end of file diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml new file mode 100644 index 00000000..904916e1 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml @@ -0,0 +1,92 @@ +declaration: + call: declare + version: 0.1 + description: "Update data model training status to trained and set training results" + method: post + accepts: json + returns: json + namespace: global-classifier + allowlist: + body: + - field: modelId + type: number + description: "Model ID to update (required)" + - field: trainingResults + type: object + description: "Training results object (required)" + - field: modelS3Location + type: string + description: "S3 location/key of the trained model (required)" + next: extractRequestData + +# Data Extraction +extractRequestData: + assign: + modelId: ${incoming.body.modelId} + trainingResults: ${incoming.body.trainingResults} + modelS3Location: ${incoming.body.modelS3Location} + next: validateRequiredFields + +# Required Field Validation +validateRequiredFields: + switch: + - condition: ${modelId === null || modelId === undefined || !modelId} + next: return_missing_model_id + - condition: ${trainingResults === null || trainingResults === undefined} + next: return_missing_training_results + - condition: ${modelS3Location === null || modelS3Location === undefined || modelS3Location === ""} + next: return_missing_model_s3_location + next: updateTrainingStatus + +# Update Training Status +updateTrainingStatus: + call: http.post + args: + url: "[#GLOBAL_CLASSIFIER_RESQL]/update-datamodels-training-status" + body: + modelId: ${modelId} + trainingResults: ${JSON.stringify(trainingResults)} + modelS3Location: ${modelS3Location} + result: update_result + next: checkUpdateResult + +# Check Update Result +checkUpdateResult: + switch: + - condition: ${!update_result || !update_result.response || !update_result.response.body} + next: return_update_failed + - condition: ${update_result.response.body.length === 0} + next: return_model_not_found + next: return_success + +# Success Response +return_success: + return: "Training status updated successfully model ID : ${update_result.response.body[0].modelId}" + status: 200 + next: end + +# Error Responses +return_missing_model_id: + return: '{"error": "Missing required field: modelId"}' + status: 400 + next: end + +return_missing_training_results: + return: '{"error": "Missing required field: trainingResults"}' + status: 400 + next: end + +return_missing_model_s3_location: + return: '{"error": "Missing required field: modelS3Location"}' + status: 400 + next: end + +return_model_not_found: + return: '{"error": "Model not found with the provided modelId"}' + status: 404 + next: end + +return_update_failed: + return: '{"error": "Failed to update training status"}' + status: 500 + next: end \ No newline at end of file diff --git a/GUI/vite.config.ts.timestamp-1752210900745-20a2e6a47a881.mjs b/GUI/vite.config.ts.timestamp-1752210900745-20a2e6a47a881.mjs new file mode 100644 index 00000000..10db9fde --- /dev/null +++ b/GUI/vite.config.ts.timestamp-1752210900745-20a2e6a47a881.mjs @@ -0,0 +1,69 @@ +// vite.config.ts +import { defineConfig } from "file:///app/node_modules/vite/dist/node/index.js"; +import react from "file:///app/node_modules/@vitejs/plugin-react/dist/index.mjs"; +import tsconfigPaths from "file:///app/node_modules/vite-tsconfig-paths/dist/index.mjs"; +import svgr from "file:///app/node_modules/vite-plugin-svgr/dist/index.mjs"; +import path from "path"; + +// vitePlugin.js +function removeHiddenMenuItems(str) { + var _a, _b; + const badJson = str.replace("export default [", "[").replace("];", "]"); + const correctJson = badJson.replace(/(['"])?([a-z0-9A-Z_]+)(['"])?:/g, '"$2": '); + const isHiddenFeaturesEnabled = ((_a = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _a.toLowerCase().trim()) == "true" || ((_b = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _b.toLowerCase().trim()) == "1"; + const json = removeHidden(JSON.parse(correctJson), isHiddenFeaturesEnabled); + const updatedJson = JSON.stringify(json); + return "export default " + updatedJson + ";"; +} +function removeHidden(menuItems, isHiddenFeaturesEnabled) { + var _a; + if (!menuItems) + return menuItems; + const arr = (_a = menuItems == null ? void 0 : menuItems.filter((x) => !x.hidden)) == null ? void 0 : _a.filter((x) => isHiddenFeaturesEnabled || x.hiddenMode !== "production"); + for (const a of arr) { + a.children = removeHidden(a.children, isHiddenFeaturesEnabled); + } + return arr; +} + +// vite.config.ts +var __vite_injected_original_dirname = "/app"; +var vite_config_default = defineConfig({ + envPrefix: "REACT_APP_", + plugins: [ + react(), + tsconfigPaths(), + svgr(), + { + name: "removeHiddenMenuItemsPlugin", + transform: (str, id) => { + if (!id.endsWith("/menu-structure.json")) + return str; + return removeHiddenMenuItems(str); + } + } + ], + base: "global-classifier", + build: { + outDir: "./build", + target: "es2015", + emptyOutDir: true + }, + server: { + headers: { + ...process.env.REACT_APP_CSP && { + "Content-Security-Policy": process.env.REACT_APP_CSP + } + } + }, + resolve: { + alias: { + "~@fontsource": path.resolve(__vite_injected_original_dirname, "node_modules/@fontsource"), + "@": `${path.resolve(__vite_injected_original_dirname, "./src")}` + } + } +}); +export { + vite_config_default as default +}; +//# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsidml0ZS5jb25maWcudHMiLCAidml0ZVBsdWdpbi5qcyJdLAogICJzb3VyY2VzQ29udGVudCI6IFsiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZS5jb25maWcudHNcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfaW1wb3J0X21ldGFfdXJsID0gXCJmaWxlOi8vL2FwcC92aXRlLmNvbmZpZy50c1wiO2ltcG9ydCB7IGRlZmluZUNvbmZpZyB9IGZyb20gJ3ZpdGUnO1xuaW1wb3J0IHJlYWN0IGZyb20gJ0B2aXRlanMvcGx1Z2luLXJlYWN0JztcbmltcG9ydCB0c2NvbmZpZ1BhdGhzIGZyb20gJ3ZpdGUtdHNjb25maWctcGF0aHMnO1xuaW1wb3J0IHN2Z3IgZnJvbSAndml0ZS1wbHVnaW4tc3Zncic7XG5pbXBvcnQgcGF0aCBmcm9tICdwYXRoJztcbmltcG9ydCB7IHJlbW92ZUhpZGRlbk1lbnVJdGVtcyB9IGZyb20gJy4vdml0ZVBsdWdpbic7XG5cbi8vIGh0dHBzOi8vdml0ZWpzLmRldi9jb25maWcvXG5leHBvcnQgZGVmYXVsdCBkZWZpbmVDb25maWcoe1xuICBlbnZQcmVmaXg6ICdSRUFDVF9BUFBfJyxcbiAgcGx1Z2luczogW1xuICAgIHJlYWN0KCksXG4gICAgdHNjb25maWdQYXRocygpLFxuICAgIHN2Z3IoKSxcbiAgICB7XG4gICAgICBuYW1lOiAncmVtb3ZlSGlkZGVuTWVudUl0ZW1zUGx1Z2luJyxcbiAgICAgIHRyYW5zZm9ybTogKHN0ciwgaWQpID0+IHtcbiAgICAgICAgaWYoIWlkLmVuZHNXaXRoKCcvbWVudS1zdHJ1Y3R1cmUuanNvbicpKVxuICAgICAgICAgIHJldHVybiBzdHI7XG4gICAgICAgIHJldHVybiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKTtcbiAgICAgIH0sXG4gICAgfSxcbiAgXSxcbiAgYmFzZTogJ2dsb2JhbC1jbGFzc2lmaWVyJyxcbiAgYnVpbGQ6IHtcbiAgICBvdXREaXI6ICcuL2J1aWxkJyxcbiAgICB0YXJnZXQ6ICdlczIwMTUnLFxuICAgIGVtcHR5T3V0RGlyOiB0cnVlLFxuICB9LFxuICBzZXJ2ZXI6IHtcbiAgICBoZWFkZXJzOiB7XG4gICAgICAuLi4ocHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0NTUCAmJiB7XG4gICAgICAgICdDb250ZW50LVNlY3VyaXR5LVBvbGljeSc6IHByb2Nlc3MuZW52LlJFQUNUX0FQUF9DU1AsXG4gICAgICB9KSxcbiAgICB9LFxuICB9LFxuICByZXNvbHZlOiB7XG4gICAgYWxpYXM6IHtcbiAgICAgICd+QGZvbnRzb3VyY2UnOiBwYXRoLnJlc29sdmUoX19kaXJuYW1lLCAnbm9kZV9tb2R1bGVzL0Bmb250c291cmNlJyksXG4gICAgICAnQCc6IGAke3BhdGgucmVzb2x2ZShfX2Rpcm5hbWUsICcuL3NyYycpfWAsXG4gICAgfSxcbiAgfSxcbn0pO1xuIiwgImNvbnN0IF9fdml0ZV9pbmplY3RlZF9vcmlnaW5hbF9kaXJuYW1lID0gXCIvYXBwXCI7Y29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2ZpbGVuYW1lID0gXCIvYXBwL3ZpdGVQbHVnaW4uanNcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfaW1wb3J0X21ldGFfdXJsID0gXCJmaWxlOi8vL2FwcC92aXRlUGx1Z2luLmpzXCI7ZXhwb3J0IGZ1bmN0aW9uIHJlbW92ZUhpZGRlbk1lbnVJdGVtcyhzdHIpIHtcbiAgY29uc3QgYmFkSnNvbiA9IHN0ci5yZXBsYWNlKCdleHBvcnQgZGVmYXVsdCBbJywgJ1snKS5yZXBsYWNlKCddOycsICddJyk7XG4gIGNvbnN0IGNvcnJlY3RKc29uID0gYmFkSnNvbi5yZXBsYWNlKC8oWydcIl0pPyhbYS16MC05QS1aX10rKShbJ1wiXSk/Oi9nLCAnXCIkMlwiOiAnKTtcblxuIGNvbnN0IGlzSGlkZGVuRmVhdHVyZXNFbmFibGVkID0gXG4gICAgcHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0VOQUJMRV9ISURERU5fRkVBVFVSRVM/LnRvTG93ZXJDYXNlKCkudHJpbSgpID09ICd0cnVlJyB8fFxuICAgIHByb2Nlc3MuZW52LlJFQUNUX0FQUF9FTkFCTEVfSElEREVOX0ZFQVRVUkVTPy50b0xvd2VyQ2FzZSgpLnRyaW0oKSA9PSAnMSc7XG5cbiAgY29uc3QganNvbiA9IHJlbW92ZUhpZGRlbihKU09OLnBhcnNlKGNvcnJlY3RKc29uKSwgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpO1xuICBcbiAgY29uc3QgdXBkYXRlZEpzb24gPSBKU09OLnN0cmluZ2lmeShqc29uKTtcblxuICByZXR1cm4gJ2V4cG9ydCBkZWZhdWx0ICcgKyB1cGRhdGVkSnNvbiArICc7J1xufVxuXG5mdW5jdGlvbiByZW1vdmVIaWRkZW4obWVudUl0ZW1zLCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCkge1xuICBpZighbWVudUl0ZW1zKSByZXR1cm4gbWVudUl0ZW1zO1xuICBjb25zdCBhcnIgPSBtZW51SXRlbXNcbiAgICA/LmZpbHRlcih4ID0+ICF4LmhpZGRlbilcbiAgICA/LmZpbHRlcih4ID0+IGlzSGlkZGVuRmVhdHVyZXNFbmFibGVkIHx8IHguaGlkZGVuTW9kZSAhPT0gXCJwcm9kdWN0aW9uXCIpO1xuICBmb3IgKGNvbnN0IGEgb2YgYXJyKSB7XG4gICAgYS5jaGlsZHJlbiA9IHJlbW92ZUhpZGRlbihhLmNoaWxkcmVuLCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCk7XG4gIH1cbiAgcmV0dXJuIGFycjtcbn1cbiJdLAogICJtYXBwaW5ncyI6ICI7QUFBOEwsU0FBUyxvQkFBb0I7QUFDM04sT0FBTyxXQUFXO0FBQ2xCLE9BQU8sbUJBQW1CO0FBQzFCLE9BQU8sVUFBVTtBQUNqQixPQUFPLFVBQVU7OztBQ0prTCxTQUFTLHNCQUFzQixLQUFLO0FBQXZPO0FBQ0UsUUFBTSxVQUFVLElBQUksUUFBUSxvQkFBb0IsR0FBRyxFQUFFLFFBQVEsTUFBTSxHQUFHO0FBQ3RFLFFBQU0sY0FBYyxRQUFRLFFBQVEsbUNBQW1DLFFBQVE7QUFFaEYsUUFBTSw0QkFDSCxhQUFRLElBQUkscUNBQVosbUJBQThDLGNBQWMsV0FBVSxZQUN0RSxhQUFRLElBQUkscUNBQVosbUJBQThDLGNBQWMsV0FBVTtBQUV4RSxRQUFNLE9BQU8sYUFBYSxLQUFLLE1BQU0sV0FBVyxHQUFHLHVCQUF1QjtBQUUxRSxRQUFNLGNBQWMsS0FBSyxVQUFVLElBQUk7QUFFdkMsU0FBTyxvQkFBb0IsY0FBYztBQUMzQztBQUVBLFNBQVMsYUFBYSxXQUFXLHlCQUF5QjtBQWYxRDtBQWdCRSxNQUFHLENBQUM7QUFBVyxXQUFPO0FBQ3RCLFFBQU0sT0FBTSw0Q0FDUixPQUFPLE9BQUssQ0FBQyxFQUFFLFlBRFAsbUJBRVIsT0FBTyxPQUFLLDJCQUEyQixFQUFFLGVBQWU7QUFDNUQsYUFBVyxLQUFLLEtBQUs7QUFDbkIsTUFBRSxXQUFXLGFBQWEsRUFBRSxVQUFVLHVCQUF1QjtBQUFBLEVBQy9EO0FBQ0EsU0FBTztBQUNUOzs7QUR4QkEsSUFBTSxtQ0FBbUM7QUFRekMsSUFBTyxzQkFBUSxhQUFhO0FBQUEsRUFDMUIsV0FBVztBQUFBLEVBQ1gsU0FBUztBQUFBLElBQ1AsTUFBTTtBQUFBLElBQ04sY0FBYztBQUFBLElBQ2QsS0FBSztBQUFBLElBQ0w7QUFBQSxNQUNFLE1BQU07QUFBQSxNQUNOLFdBQVcsQ0FBQyxLQUFLLE9BQU87QUFDdEIsWUFBRyxDQUFDLEdBQUcsU0FBUyxzQkFBc0I7QUFDcEMsaUJBQU87QUFDVCxlQUFPLHNCQUFzQixHQUFHO0FBQUEsTUFDbEM7QUFBQSxJQUNGO0FBQUEsRUFDRjtBQUFBLEVBQ0EsTUFBTTtBQUFBLEVBQ04sT0FBTztBQUFBLElBQ0wsUUFBUTtBQUFBLElBQ1IsUUFBUTtBQUFBLElBQ1IsYUFBYTtBQUFBLEVBQ2Y7QUFBQSxFQUNBLFFBQVE7QUFBQSxJQUNOLFNBQVM7QUFBQSxNQUNQLEdBQUksUUFBUSxJQUFJLGlCQUFpQjtBQUFBLFFBQy9CLDJCQUEyQixRQUFRLElBQUk7QUFBQSxNQUN6QztBQUFBLElBQ0Y7QUFBQSxFQUNGO0FBQUEsRUFDQSxTQUFTO0FBQUEsSUFDUCxPQUFPO0FBQUEsTUFDTCxnQkFBZ0IsS0FBSyxRQUFRLGtDQUFXLDBCQUEwQjtBQUFBLE1BQ2xFLEtBQUssR0FBRyxLQUFLLFFBQVEsa0NBQVcsT0FBTyxDQUFDO0FBQUEsSUFDMUM7QUFBQSxFQUNGO0FBQ0YsQ0FBQzsiLAogICJuYW1lcyI6IFtdCn0K diff --git a/config.env b/config.env index e57a60da..b4556b38 100644 --- a/config.env +++ b/config.env @@ -2,9 +2,9 @@ API_CORS_ORIGIN=* API_DOCUMENTATION_ENABLED=true S3_REGION=eu-west-1 S3_ENDPOINT_URL=http://minio:9000 -S3_DATA_BUCKET_PATH=datasets/ +S3_DATA_BUCKET_PATH=resources/ S3_DATA_BUCKET_NAME=global-classifier FS_DATA_DIRECTORY_PATH=/app -S3_SECRET_ACCESS_KEY=value -S3_ACCESS_KEY_ID=value +S3_SECRET_ACCESS_KEY=LEUClnCvParcv60pRehx9t4dfWbSFY4sBkMEqNy8 +S3_ACCESS_KEY_ID=3cNK2fECOylug9wACgEx PORT=3000 \ No newline at end of file diff --git a/sidecar.env b/sidecar.env index 7c49bc0b..dda8fe4a 100644 --- a/sidecar.env +++ b/sidecar.env @@ -1,6 +1,6 @@ # MLFLOW MLFLOW_TRACKING_USERNAME=mlflowadmin -MLFLOW_TRACKING_PASSWORD=value +MLFLOW_TRACKING_PASSWORD=mlflowadmin MLFLOW_HOST_PORT=5000 MLFLOW_CONT_PORT=5000 MLFLOW_HOST=0.0.0.0 diff --git a/src/training/scripts/constants.py b/src/training/scripts/constants.py index be9a9a61..16fd4981 100644 --- a/src/training/scripts/constants.py +++ b/src/training/scripts/constants.py @@ -35,3 +35,6 @@ TRAINING_JOB_STATUS_UPDATE_URL = ( "http://resql:8082/global-classifier/update-training-job-status" ) +DATA_MODEL_TRAINING_UPDATE_URL = ( + "http://localhost:8088/global-classifier/datamodels/update-training" +) diff --git a/src/training/scripts/s3_utility_handler.py b/src/training/scripts/s3_utility_handler.py index ff681460..3e8ac982 100644 --- a/src/training/scripts/s3_utility_handler.py +++ b/src/training/scripts/s3_utility_handler.py @@ -78,7 +78,7 @@ def download_aggregated_dataset(self, dataset_id: str) -> str: logger.info(f"Downloading aggregated dataset for dataset ID: {dataset_id}") # Define paths - s3_source_path = f"{dataset_id}/aggregated_dataset.json" + s3_source_path = f"datasets/{dataset_id}/aggregated_dataset.json" local_dest_path = ( f"src/training/dataset_artifacts/training_datasets/{dataset_id}.json" ) @@ -144,7 +144,7 @@ def upload_trained_model( self._zip_directory(model_dir, zip_path) # Upload to S3 - s3_dest_path = f"trained_models/{model_id}/{zip_filename}" + s3_dest_path = f"models/{model_id}/{zip_filename}" logger.info(f"Uploading zipped model to S3: {s3_dest_path}") diff --git a/src/training/scripts/train.py b/src/training/scripts/train.py index 8e339b61..62c6498d 100644 --- a/src/training/scripts/train.py +++ b/src/training/scripts/train.py @@ -23,6 +23,7 @@ measure_inference_time, evaluate, update_job_status, + update_data_model_training, ) import mlflow @@ -778,7 +779,7 @@ def train_multiple_models(args): ) logger.info(f"{'=' * 60}") - return preprocessed_result_payload + return preprocessed_result_payload, s3_model_path except Exception as e: logger.error(f"❌ Critical error in train_multiple_models: {str(e)}") raise # raise the exception so main() can handle it @@ -979,7 +980,7 @@ def main(): MODEL_CONFIG["other"] = {"name": args.model_name, "max_length": 128} # Train multiple models - preprocessed_result_payload = train_multiple_models(args) + preprocessed_result_payload, s3_model_path = train_multiple_models(args) # Check if training was successful if not preprocessed_result_payload or not preprocessed_result_payload.get( @@ -999,9 +1000,23 @@ def main(): logger.error("❌ Training failed: No models were successfully trained") sys.exit(1) - # ======================TO DO: training results in DB======================== logger.info(f"Preprocessed result payload: {preprocessed_result_payload}") - # =========================================================================== + + model_s3_location = s3_model_path + + # Send training results to API for database storage + logger.info("📤 Sending training results to data model table...") + api_success = update_data_model_training( + model_id=args.model_id, + training_results=preprocessed_result_payload, + model_s3_location=model_s3_location + ) + + if not api_success: + logger.warning("⚠️ Failed to send training results to API, but continuing with job completion...") + # Note: We don't exit here as the training was successful, just the API call failed + else: + logger.info("✅ Training results successfully sent to database") # Update job status to trained job_status = update_job_status(job_id=args.job_id, status="trained") diff --git a/src/training/scripts/utils.py b/src/training/scripts/utils.py index 63c969fb..b6d7bb1e 100644 --- a/src/training/scripts/utils.py +++ b/src/training/scripts/utils.py @@ -9,6 +9,7 @@ import numpy as np import requests import os +import json import sys from loguru import logger from typing import Dict, Any @@ -25,6 +26,7 @@ RETENTION_PERIOD, LOG_FILE_HANDLER_FORMAT, TRAINING_JOB_STATUS_UPDATE_URL, + DATA_MODEL_TRAINING_UPDATE_URL, SEED, ) @@ -140,10 +142,10 @@ def compute_metrics(predictions, labels, probs, class_names=None): # Build metrics dictionary - FLAT structure with meaningful names metrics = { - "accuracy": accuracy, - "precision": precision, - "recall": recall, - "f1": f1, + "accuracy": float(accuracy), + "precision": float(precision), + "recall": float(recall), + "f1": float(f1), } # Add per-class metrics directly to main dictionary (no nesting) @@ -156,16 +158,16 @@ def compute_metrics(predictions, labels, probs, class_names=None): class_key = f"class_{i}" # Add metrics directly to main dictionary - metrics[f"{class_key}_precision"] = precision_per_class[i] - metrics[f"{class_key}_recall"] = recall_per_class[i] - metrics[f"{class_key}_f1"] = f1_per_class[i] - metrics[f"{class_key}_accuracy"] = per_class_accuracy[i] + metrics[f"{class_key}_precision"] = float(precision_per_class[i]) + metrics[f"{class_key}_recall"] = float(recall_per_class[i]) + metrics[f"{class_key}_f1"] = float(f1_per_class[i]) + metrics[f"{class_key}_accuracy"] = float(per_class_accuracy[i]) # ROC AUC for binary/multiclass if len(np.unique(labels)) == 2: - metrics["roc_auc"] = roc_auc_score(labels, probs[:, 1]) + metrics["roc_auc"] = float(roc_auc_score(labels, probs[:, 1])) else: - metrics["roc_auc"] = roc_auc_score(labels, probs, multi_class="ovr") + metrics["roc_auc"] = float(roc_auc_score(labels, probs, multi_class="ovr")) return metrics, cm @@ -193,6 +195,56 @@ def update_job_status(job_id: int, status: str) -> bool: logger.error(f"Error updating job status: {str(e)}") return False +def update_data_model_training(model_id: int, training_results: dict, model_s3_location: str) -> bool: + """ + Send training results to the API endpoint for database storage. + + Args: + model_id (int): The model ID + training_results (dict): Preprocessed training results payload + + Returns: + bool: True if successful, False otherwise + """ + try: + api_url = DATA_MODEL_TRAINING_UPDATE_URL + + payload = { + "modelId": model_id, + "trainingResults": training_results, + "modelS3Location": model_s3_location + } + + logger.info(f"Sending training results to API for model ID: {model_id}") + logger.debug(f"API URL: {api_url}") + logger.debug(f"Payload size: {len(json.dumps(payload))} characters") + + # Send POST request + response = requests.post( + api_url, + json=payload, + headers={ + "Content-Type": "application/json" + } + ) + + # Check response + if response.status_code == 200: + logger.info("✅ Training results successfully sent to API") + logger.debug(f"API Response: {response.text}") + return True + else: + logger.error(f"❌ API request failed with status code: {response.status_code}") + logger.error(f"Response text: {response.text}") + return False + + except requests.exceptions.RequestException as e: + logger.error(f"❌ Network error when sending training results to API: {str(e)}") + return False + except Exception as e: + logger.error(f"❌ Unexpected error when sending training results to API: {str(e)}") + return False + def preprocess_training_summary_from_dict( training_summary_dict: Dict[str, Any], @@ -417,4 +469,4 @@ def measure_inference_time(model, dataloader, device, num_runs=100): times.append(end_time - start_time) - return np.mean(times) + return float(np.mean(times)) From 7d7fc50a47399ed8ae69da0aed262ee6bc0983dc Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 14 Jul 2025 20:57:13 +0530 Subject: [PATCH 091/195] pr comments --- GUI/src/pages/DataModels/ConfigureDataModel.tsx | 9 +-------- GUI/src/pages/DataModels/CreateDataModel.tsx | 2 +- GUI/src/pages/DataModels/DataModels.scss | 15 ++++++++------- GUI/src/pages/ViewDataset/index.tsx | 2 -- 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index 57e84b4a..65e24ff4 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -246,14 +246,7 @@ const ConfigureDataModel: FC = () => { > {t('dataModels.configureDataModel.deleteModal')} - +
    ) + footer: (
    ) }); } else { mutation.mutate(paylod); diff --git a/GUI/src/pages/DataModels/DataModels.scss b/GUI/src/pages/DataModels/DataModels.scss index 89d86e77..49197f97 100644 --- a/GUI/src/pages/DataModels/DataModels.scss +++ b/GUI/src/pages/DataModels/DataModels.scss @@ -32,6 +32,7 @@ body { transform: scale(1); opacity: 0; } + 100% { transform: scale(1.02); opacity: 1; @@ -58,19 +59,19 @@ body { margin: 30px 0px; } -.models-filter-div{ +.models-filter-div { display: flex; flex-wrap: wrap; gap: 16px; width: 100%; } -.filter-buttons{ +.filter-buttons { display: flex; gap: 16px; } -.data-model-buttons{ +.data-model-buttons { align-items: end; gap: 10px; justify-content: end; @@ -79,13 +80,13 @@ body { background-color: white; } -.metadata-card{ +.metadata-card { justify-content: center; text-align: center; - background-color:#FFE8E9 ; - color:#D73E3E; + background-color: #FFE8E9; + color: #D73E3E; border-radius: .3rem; padding: 2rem; border: 1px solid #D73E3E; margin-bottom: 2rem; -} +} \ No newline at end of file diff --git a/GUI/src/pages/ViewDataset/index.tsx b/GUI/src/pages/ViewDataset/index.tsx index b0e58d3f..4ec7ba6f 100644 --- a/GUI/src/pages/ViewDataset/index.tsx +++ b/GUI/src/pages/ViewDataset/index.tsx @@ -184,8 +184,6 @@ const ViewDataset = () => { }; console.log(payload, 'minorUpdatePayload'); }; - - console.log(metadata); return ( From 2f26d11f2c9c4db7f95867f523a2a5a80c4bbcc8 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Tue, 15 Jul 2025 07:17:13 +0530 Subject: [PATCH 092/195] complte training pipeline with testing --- DSL/CronManager/script/callback_format_v1.sh | 101 ------------------ .../script/train_script_starter.sh | 2 +- .../POST/datamodels/create.yml | 12 ++- .../POST/datamodels/major.yml | 10 +- .../POST/datamodels/minor.yml | 10 +- .../POST/datamodels/update-training.yml | 0 config.env | 4 +- sidecar.env | 2 +- src/training/scripts/constants.py | 2 +- src/training/scripts/train.py | 21 ++-- src/training/scripts/utils.py | 67 +++++++++--- 11 files changed, 87 insertions(+), 144 deletions(-) delete mode 100755 DSL/CronManager/script/callback_format_v1.sh rename DSL/{Ruuter.private => Ruuter.public}/global-classifier/POST/datamodels/update-training.yml (100%) diff --git a/DSL/CronManager/script/callback_format_v1.sh b/DSL/CronManager/script/callback_format_v1.sh deleted file mode 100755 index 2022bb65..00000000 --- a/DSL/CronManager/script/callback_format_v1.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/bash - -echo "Started Shell Script for Dataset Generation Callback Processing" - -# Check if environment variables are set -if [ -z "$filePath" ] || [ -z "$results" ]; then - echo "Please set the filePath and results environment variables." - exit 1 -fi - -# Logging function -log() { - echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" -} - -log "Dataset generation callback processing started" -log "File path: $filePath" -log "Encoded results length: ${#results} characters" - -# Extract dataset ID from file path for logging -dataset_id=$(echo "$filePath" | grep -o '/[^/]*\.json$' | sed 's|/\([^/]*\)\.json$|\1|' || echo "unknown") -log "Extracted dataset ID: $dataset_id" - -# API endpoint for processing generation callback -API_URL="http://s3-dataset-processor:8001/process-generation-callback" - -log "🔍 Calling S3 Dataset Processor API to process generation callback..." - -# Call the API to process generation callback (background processing) -response=$(curl -s -o /tmp/callback_response_body.txt -w "%{http_code}" -X POST "$API_URL" \ - -H "Content-Type: application/json" \ - -d "{\"file_path\":\"$filePath\", \"results\":\"$results\"}") - -http_code="$response" -response_body=$(cat /tmp/callback_response_body.txt) - -log "🔍 HTTP Status Code: $http_code" -log "🔍 Response Body: $response_body" - -# Check if API call was successful (should get 200 immediately) -if [ "$http_code" = "200" ] && [ -n "$response_body" ]; then - log "✅ Callback processing request accepted successfully" - - # Parse the response to get status information - if command -v jq >/dev/null 2>&1; then - # Use jq if available - status=$(echo "$response_body" | jq -r '.status // "unknown"') - message=$(echo "$response_body" | jq -r '.message // "unknown"') - - log "📊 Callback Processing Status:" - log " - Status: $status" - log " - Message: $message" - log " - Dataset ID: $dataset_id" - - else - # Fallback parsing without jq - log "⚠️ jq not available, using grep/sed for parsing" - - status=$(echo "$response_body" | grep -o '"status":"[^"]*"' | sed 's/.*"status":"\([^"]*\)".*/\1/' || echo "unknown") - message=$(echo "$response_body" | grep -o '"message":"[^"]*"' | sed 's/.*"message":"\([^"]*\)".*/\1/' || echo "unknown") - - log "📊 Callback Processing Status:" - log " - Status: $status" - log " - Message: $message" - log " - Dataset ID: $dataset_id" - fi - - # Check if callback processing was accepted - if [ "$status" = "accepted" ]; then - log "✅ Dataset generation callback submitted for background processing" - log "🔄 Background task will create the following payload structure:" - log " - agencies: [{agencyId: X, syncStatus: Synced_with_CKB/Sync_with_CKB_Failed}, ...]" - log " - datasetId: $dataset_id" - log " - generationStatus: Generation_Success/Generation_Failed" - - log "📋 Note: Actual callback processing is happening in the background" - log "📋 Check the S3 processor service logs for detailed processing results" - - else - log "⚠️ Unexpected status received: $status" - log "⚠️ Message: $message" - fi - -else - log "❌ Callback processing request failed" - log "HTTP Status: $http_code" - log "Response: $response_body" - - # Clean up temp files - rm -f /tmp/callback_response_body.txt - exit 1 -fi - -# Clean up temp files -rm -f /tmp/callback_response_body.txt - -log "✅ Dataset generation callback processing completed successfully" -log "📋 Summary: Dataset ID: $dataset_id, Request Status: $status" -log "📋 Background processing will generate the final callback payload" - -exit 0 \ No newline at end of file diff --git a/DSL/CronManager/script/train_script_starter.sh b/DSL/CronManager/script/train_script_starter.sh index 86530f5b..6cf4f7e0 100644 --- a/DSL/CronManager/script/train_script_starter.sh +++ b/DSL/CronManager/script/train_script_starter.sh @@ -180,7 +180,7 @@ training_output_dir="${TRAINING_OUTPUT_DIR}/model_${model_id}" mkdir -p "$training_output_dir" # Set default training parameters (can be made configurable) -max_seq_length=256 +max_seq_length=128 num_epochs=3 batch_size=8 learning_rate=2e-5 diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml index facd9926..72d9cb8a 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/create.yml @@ -105,9 +105,15 @@ initiateTraining: call: http.post args: url: "[#GLOBAL_CLASSIFIER_RUUTER_PRIVATE]/datamodels/train" + headers: + cookie: ${incoming.headers.cookie} body: - modelId: ${insert_model_res.response.body[0].modelId} + modelId: ${insert_model_res.response.body[0].modelId} result: training_res + next: logTrainingResult + +logTrainingResult: + log: "Initiate training response: ${training_res.response}" next: checkTrainingResult # Check Training Result @@ -115,12 +121,12 @@ checkTrainingResult: switch: - condition: ${!training_res || !training_res.response} next: return_training_failed - - condition: ${training_res.response.body[0].operationSuccessful !== true} + - condition: ${training_res.response.body.response.operationSuccessful !== true} next: return_training_failed next: return_success return_success: - return: "Data model created successfully and training initiated. Model ID:${insert_model_res.response.body[0].modelId} Job ID: ${training_res.response.body[0].jobId}" + return: "Data model created successfully and training initiated. Model ID:${insert_model_res.response.body[0].modelId} Job ID: ${training_res.response.body.response.jobId}" status: 200 next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml index e3c955a0..73ae581f 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/major.yml @@ -177,9 +177,15 @@ initiateTraining: call: http.post args: url: "[#GLOBAL_CLASSIFIER_RUUTER_PRIVATE]/datamodels/train" + headers: + cookie: ${incoming.headers.cookie} body: modelId: ${insertResult.response.body[0].modelId} result: training_res + next: logTrainingResult + +logTrainingResult: + log: "Initiate training response: ${training_res.response}" next: checkTrainingResult # Check Training Result @@ -187,12 +193,12 @@ checkTrainingResult: switch: - condition: ${!training_res || !training_res.response} next: return_training_failed - - condition: ${training_res.response.body[0].operationSuccessful !== true} + - condition: ${training_res.response.body.response.operationSuccessful !== true} next: return_training_failed next: return_success return_success: - return: "Data model created successfully and training initiated. Model ID:${insertResult.response.body[0].modelId} Job ID: ${training_res.response.body[0].jobId}" + return: "Data model created successfully and training initiated. Model ID:${insertResult.response.body[0].modelId} Job ID: ${training_res.response.body.response.jobId}" status: 200 next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml b/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml index ce221465..b53c02b0 100644 --- a/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml +++ b/DSL/Ruuter.private/global-classifier/POST/datamodels/minor.yml @@ -169,9 +169,15 @@ initiateTraining: call: http.post args: url: "[#GLOBAL_CLASSIFIER_RUUTER_PRIVATE]/datamodels/train" + headers: + cookie: ${incoming.headers.cookie} body: modelId: ${insertResult.response.body[0].modelId} result: training_res + next: logTrainingResult + +logTrainingResult: + log: "Initiate training response: ${training_res.response}" next: checkTrainingResult # Check Training Result @@ -179,12 +185,12 @@ checkTrainingResult: switch: - condition: ${!training_res || !training_res.response} next: return_training_failed - - condition: ${training_res.response.body[0].operationSuccessful !== true} + - condition: ${training_res.response.body.response.operationSuccessful !== true} next: return_training_failed next: return_success return_success: - return: "Data model created successfully and training initiated. Model ID:${insertResult.response.body[0].modelId} Job ID: ${training_res.response.body[0].jobId}" + return: "Data model created successfully and training initiated. Model ID:${insertResult.response.body[0].modelId} Job ID: ${training_res.response.body.response.jobId}" status: 200 next: end diff --git a/DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml b/DSL/Ruuter.public/global-classifier/POST/datamodels/update-training.yml similarity index 100% rename from DSL/Ruuter.private/global-classifier/POST/datamodels/update-training.yml rename to DSL/Ruuter.public/global-classifier/POST/datamodels/update-training.yml diff --git a/config.env b/config.env index b4556b38..e4368fab 100644 --- a/config.env +++ b/config.env @@ -5,6 +5,6 @@ S3_ENDPOINT_URL=http://minio:9000 S3_DATA_BUCKET_PATH=resources/ S3_DATA_BUCKET_NAME=global-classifier FS_DATA_DIRECTORY_PATH=/app -S3_SECRET_ACCESS_KEY=LEUClnCvParcv60pRehx9t4dfWbSFY4sBkMEqNy8 -S3_ACCESS_KEY_ID=3cNK2fECOylug9wACgEx +S3_SECRET_ACCESS_KEY=value +S3_ACCESS_KEY_ID=value PORT=3000 \ No newline at end of file diff --git a/sidecar.env b/sidecar.env index dda8fe4a..7c49bc0b 100644 --- a/sidecar.env +++ b/sidecar.env @@ -1,6 +1,6 @@ # MLFLOW MLFLOW_TRACKING_USERNAME=mlflowadmin -MLFLOW_TRACKING_PASSWORD=mlflowadmin +MLFLOW_TRACKING_PASSWORD=value MLFLOW_HOST_PORT=5000 MLFLOW_CONT_PORT=5000 MLFLOW_HOST=0.0.0.0 diff --git a/src/training/scripts/constants.py b/src/training/scripts/constants.py index 16fd4981..63337a68 100644 --- a/src/training/scripts/constants.py +++ b/src/training/scripts/constants.py @@ -36,5 +36,5 @@ "http://resql:8082/global-classifier/update-training-job-status" ) DATA_MODEL_TRAINING_UPDATE_URL = ( - "http://localhost:8088/global-classifier/datamodels/update-training" + "http://ruuter-public:8086/global-classifier/datamodels/update-training" ) diff --git a/src/training/scripts/train.py b/src/training/scripts/train.py index 62c6498d..1f11337b 100644 --- a/src/training/scripts/train.py +++ b/src/training/scripts/train.py @@ -118,15 +118,6 @@ def convert_model_to_onnx(model_dir: str): ) onnx_config = model_onnx_config(model.config) - # # Create dummy input - # dummy_inputs= tokenizer( - # "This is a dummy input for ONNX export", - # return_tensors="pt", - # padding="max_length", - # truncation=True, - # max_length=128, - # ) - output_path = Path(model_dir) / "model.onnx" logger.info(f"Exporting model to ONNX format at: {output_path}") @@ -1001,19 +992,21 @@ def main(): sys.exit(1) logger.info(f"Preprocessed result payload: {preprocessed_result_payload}") - + model_s3_location = s3_model_path - + # Send training results to API for database storage logger.info("📤 Sending training results to data model table...") api_success = update_data_model_training( model_id=args.model_id, training_results=preprocessed_result_payload, - model_s3_location=model_s3_location + model_s3_location=model_s3_location, ) - + if not api_success: - logger.warning("⚠️ Failed to send training results to API, but continuing with job completion...") + logger.warning( + "⚠️ Failed to send training results to API, but continuing with job completion..." + ) # Note: We don't exit here as the training was successful, just the API call failed else: logger.info("✅ Training results successfully sent to database") diff --git a/src/training/scripts/utils.py b/src/training/scripts/utils.py index b6d7bb1e..499bd5c6 100644 --- a/src/training/scripts/utils.py +++ b/src/training/scripts/utils.py @@ -13,6 +13,7 @@ import sys from loguru import logger from typing import Dict, Any +from datetime import datetime from sklearn.metrics import ( accuracy_score, precision_recall_fscore_support, @@ -195,54 +196,59 @@ def update_job_status(job_id: int, status: str) -> bool: logger.error(f"Error updating job status: {str(e)}") return False -def update_data_model_training(model_id: int, training_results: dict, model_s3_location: str) -> bool: + +def update_data_model_training( + model_id: int, training_results: dict, model_s3_location: str +) -> bool: """ Send training results to the API endpoint for database storage. - + Args: model_id (int): The model ID training_results (dict): Preprocessed training results payload - + Returns: bool: True if successful, False otherwise """ try: api_url = DATA_MODEL_TRAINING_UPDATE_URL - + + training_results = ensure_json_serializable(training_results) + payload = { "modelId": model_id, "trainingResults": training_results, - "modelS3Location": model_s3_location + "modelS3Location": model_s3_location, } - + logger.info(f"Sending training results to API for model ID: {model_id}") logger.debug(f"API URL: {api_url}") - logger.debug(f"Payload size: {len(json.dumps(payload))} characters") - + logger.debug(f"Payload: {json.dumps(payload, indent=2)}") + # Send POST request response = requests.post( - api_url, - json=payload, - headers={ - "Content-Type": "application/json" - } + api_url, json=payload, headers={"Content-Type": "application/json"} ) - + # Check response if response.status_code == 200: logger.info("✅ Training results successfully sent to API") logger.debug(f"API Response: {response.text}") return True else: - logger.error(f"❌ API request failed with status code: {response.status_code}") + logger.error( + f"❌ API request failed with status code: {response.status_code}" + ) logger.error(f"Response text: {response.text}") return False - + except requests.exceptions.RequestException as e: logger.error(f"❌ Network error when sending training results to API: {str(e)}") return False except Exception as e: - logger.error(f"❌ Unexpected error when sending training results to API: {str(e)}") + logger.error( + f"❌ Unexpected error when sending training results to API: {str(e)}" + ) return False @@ -470,3 +476,30 @@ def measure_inference_time(model, dataloader, device, num_runs=100): times.append(end_time - start_time) return float(np.mean(times)) + + +def ensure_json_serializable(obj): + """ + Recursively ensure all values in the object are JSON serializable. + Converts Python objects to proper JSON-compatible types. + """ + if isinstance(obj, dict): + return {key: ensure_json_serializable(value) for key, value in obj.items()} + elif isinstance(obj, list): + return [ensure_json_serializable(item) for item in obj] + elif isinstance(obj, tuple): + return [ensure_json_serializable(item) for item in obj] + elif isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, datetime): + return obj.isoformat() + elif obj is None or isinstance(obj, (bool, int, float, str)): + return obj + else: + # Convert unknown types to string as fallback + logger.warning(f"Converting unknown type {type(obj)} to string: {obj}") + return str(obj) From 33017a993e3505e54363ad52b6a9277e60178b17 Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Wed, 16 Jul 2025 10:18:59 +0530 Subject: [PATCH 093/195] updated docker compose config and data addition architectur --- docker-compose-dev.yml | 337 ++++++++++++++++++ ...r-dataset-pipeline-architecture -v2.drawio | 120 +++++-- 2 files changed, 428 insertions(+), 29 deletions(-) create mode 100644 docker-compose-dev.yml diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml new file mode 100644 index 00000000..b2a52c0b --- /dev/null +++ b/docker-compose-dev.yml @@ -0,0 +1,337 @@ +services: + +# Docker compose without dataset generator services + ruuter-public: + container_name: ruuter-public + image: ruuter + environment: + - application.cors.allowedOrigins=http://localhost:8086,http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080,http://localhost:8000,http://localhost:8090 + - application.httpCodesAllowList=200,201,202,204,400,401,403,500 + - application.internalRequests.allowedIPs=127.0.0.1 + - application.logging.displayRequestContent=true + - application.logging.displayResponseContent=true + - application.logging.printStackTrace=true + - application.internalRequests.disabled=true + - server.port=8086 + volumes: + - ./DSL/Ruuter.public:/DSL + - ./constants.ini:/app/constants.ini + ports: + - 8086:8086 + networks: + - bykstack + cpus: "0.5" + mem_limit: "512M" + + ruuter-private: + container_name: ruuter-private + image: ruuter + environment: + - application.cors.allowedOrigins=http://localhost:3001,http://localhost:3003,http://localhost:8088,http://localhost:3002,http://localhost:3004,http://localhost:8000 + - application.httpCodesAllowList=200,201,202,400,401,403,500 + - application.internalRequests.allowedIPs=127.0.0.1 + - application.logging.displayRequestContent=true + - application.logging.displayResponseContent=true + - application.logging.printStackTrace=true + - application.internalRequests.disabled=true + - server.port=8088 + volumes: + - ./DSL/Ruuter.private:/DSL + - ./constants.ini:/app/constants.ini + ports: + - 8088:8088 + networks: + - bykstack + cpus: "0.5" + mem_limit: "512M" + + data-mapper: + container_name: data-mapper + image: data-mapper + environment: + - PORT=3000 + - CONTENT_FOLDER=/data + volumes: + - ./DSL:/data + - ./DSL/DMapper/global-classifier/hbs:/workspace/app/views/global-classifier + - ./DSL/DMapper/global-classifier/lib:/workspace/app/lib + ports: + - 3000:3000 + networks: + - bykstack + + tim: + container_name: tim + image: tim + depends_on: + - tim-postgresql + environment: + - SECURITY_ALLOWLIST_JWT=ruuter-private,ruuter-public,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1 + - KEY_PASS=ppjjpp + ports: + - 8085:8085 + networks: + - bykstack + extra_hosts: + - "host.docker.internal:host-gateway" + cpus: "0.5" + mem_limit: "512M" + + tim-postgresql: + container_name: tim-postgresql + image: postgres:14.1 + environment: + - POSTGRES_USER=tim + - POSTGRES_PASSWORD=123 + - POSTGRES_DB=tim + # - POSTGRES_HOST_AUTH_METHOD=trust + volumes: + - ./tim-db:/var/lib/postgresql/data + ports: + - 9876:5432 + networks: + - bykstack + + authentication-layer: + container_name: authentication-layer + image: authentication-layer + ports: + - 3004:3004 + networks: + - bykstack + + resql: + container_name: resql + image: resql + depends_on: + - users_db + environment: + - sqlms.datasources.[0].name=byk + - sqlms.datasources.[0].jdbcUrl=jdbc:postgresql://users_db:5432/global-classifier #For LocalDb Use + # sqlms.datasources.[0].jdbcUrl=jdbc:postgresql://171.22.247.13:5435/byk?sslmode=require + - sqlms.datasources.[0].username=postgres + - sqlms.datasources.[0].password=dbadmin + - logging.level.org.springframework.boot=INFO + ports: + - 8082:8082 + volumes: + - ./DSL/Resql:/DSL + networks: + - bykstack + + users_db: + container_name: users_db + image: postgres:14.1 + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=dbadmin + - POSTGRES_DB=global-classifier + ports: + - 5435:5432 + volumes: + - ./global-classifier-db/db_files:/var/lib/postgresql/data + + networks: + - bykstack + restart: always + + init: + image: busybox + command: ["sh", "-c", "chmod -R 777 /shared && chmod -R 777 /app/model_trainer"] + volumes: + - shared-volume:/shared + - ./model_trainer:/app/model_trainer + networks: + - bykstack + + cron-manager: + container_name: cron-manager + image: cron-manager-python:latest + user: "root" + volumes: + - ./DSL/CronManager/DSL:/DSL + - ./DSL/CronManager/script:/app/scripts + - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + - ./src/s3_dataset_processor:/app/src/s3_dataset_processor + - ./DSL/DatasetGenerator/config:/app/config + - cron_data:/app/data + environment: + - server.port=9010 + ports: + - 9010:8080 + networks: + - bykstack + depends_on: + - init + + # classifier-service: + # container_name: classifier-service + # build: + # context: ./src/classifier-service + # ports: + # - "8090:8090" + # networks: + # - bykstack + # volumes: + # - ./src/classifier-service:/app + # environment: + # - NODE_ENV=development + # restart: always + + # Dataset Generator services + # dataset-gen-ollama: + # image: synthesisai/dataset-generator-ollama:latest + # container_name: dataset-gen-ollama + # ports: + # - "11434:11434" + # environment: + # # - NVIDIA_VISIBLE_DEVICES=all + # # - OLLAMA_USE_GPU=1 + # - OLLAMA_HOST=0.0.0.0 + # volumes: + # - dataset_gen_ollama_models:/root/.ollama + # - ./DSL/DatasetGenerator/ollama-entrypoint.sh:/ollama-entrypoint.sh + # entrypoint: ["bash", "/ollama-entrypoint.sh"] + # # deploy: + # # resources: + # # reservations: + # # devices: + # # - driver: nvidia + # # count: 1 + # # capabilities: [gpu] + # networks: + # - bykstack + + # dataset-gen-service: + # image: synthesisai/dataset-generator:latest + # container_name: dataset-gen-service + # ports: + # - "8000:8000" + # environment: + # - PROVIDER_API_URL=http://dataset-gen-ollama:11434 + # - SERVICE_DEBUG=false + # - MLFLOW_TRACKING_URI=http://dataset-gen-mlflow:5000 + # volumes: + # - ./DSL/DatasetGenerator/config:/app/config + # - ./DSL/DatasetGenerator/templates:/app/templates + # - ./DSL/DatasetGenerator/user_configs:/app/user_configs + # - cron_data:/app/data + # - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + # - ./DSL/DatasetGenerator/logs:/app/logs + # depends_on: + # - dataset-gen-ollama + # - dataset-gen-mlflow + # networks: + # - bykstack + + # dataset-gen-mlflow: + # image: synthesisai/dataset-generator-mlflow:latest + # container_name: dataset-gen-mlflow + # ports: + # - "5000:5000" + # env_file: + # - sidecar.env + # environment: + # - MLFLOW_TRACKING_USERNAME=${MLFLOW_TRACKING_USERNAME} + # - MLFLOW_TRACKING_PASSWORD=${MLFLOW_TRACKING_PASSWORD} + # - MLFLOW_HOST=${MLFLOW_HOST} + # - MLFLOW_PORT=${MLFLOW_PORT} + # - MLFLOW_BACKEND_STORE_URI=${MLFLOW_BACKEND_STORE_URI} + # - MLFLOW_DEFAULT_ARTIFACT_ROOT=${MLFLOW_DEFAULT_ARTIFACT_ROOT} + # - MLFLOW_FLASK_SERVER_SECRET_KEY=${MLFLOW_FLASK_SERVER_SECRET_KEY} + # volumes: + # - ./DSL/DatasetGenerator/mlflow_data:/mlflow/mlflow_data + # - ./DSL/DatasetGenerator/mlflow_artifacts:/mlflow/mlflow_artifacts + # networks: + # - bykstack + + + minio: + image: minio/minio:latest + container_name: minio + env_file: + - sidecar.env + environment: + - MINIO_ROOT_USER=${MINIO_ROOT_USER} + - MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD} + - MINIO_BROWSER_REDIRECT_URL=${MINIO_BROWSER_REDIRECT_URL} + command: server /data --console-address ":9001" + volumes: + - minio_data:/data + ports: + - "9000:9000" # API port + - "9001:9001" # Console port + networks: + - bykstack + + gc-s3-ferry: + image: s3-ferry:latest + container_name: gc-s3-ferry + volumes: + - ./DSL/DatasetGenerator/output_datasets:/app/output_datasets + env_file: + - config.env + ports: + - "3006:3000" + user: "root" + networks: + - bykstack + + dataset-file-handler: + container_name: dataset-file-handler + build: ./src/dataset_file_handler + ports: + - "8001:8001" + volumes: + - cron_data:/app/data # Same volume as cron-manager + environment: + - PORT=8001 + networks: + - bykstack + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8001/health"] + interval: 30s + timeout: 10s + retries: 3 + + gui: + container_name: gui + environment: + - NODE_ENV=local + - REACT_APP_RUUTER_API_URL=http://localhost:8086 + - REACT_APP_RUUTER_PRIVATE_API_URL=http://localhost:8088 + - REACT_APP_EXTERNAL_API_URL=http://localhost:8000 + - REACT_APP_CUSTOMER_SERVICE_LOGIN=http://localhost:3004/et/dev-auth + - REACT_APP_NOTIFICATION_NODE_URL=http://localhost:4040 + - REACT_APP_CSP=upgrade-insecure-requests; default-src 'self'; font-src 'self' data:; img-src 'self' data:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' http://localhost:8086 http://localhost:8088 http://localhost:8085 http://localhost:4040 http://localhost:3001 http://localhost:8000; + - DEBUG_ENABLED=true + - CHOKIDAR_USEPOLLING=true + - PORT=3001 + - REACT_APP_SERVICE_ID=conversations,settings,monitoring + - REACT_APP_ENABLE_HIDDEN_FEATURES=TRUE + + build: + context: ./GUI + dockerfile: Dockerfile.dev + ports: + - 3003:3001 + volumes: + - /app/node_modules + - ./GUI:/app + networks: + - bykstack + cpus: "0.5" + mem_limit: "1G" + +volumes: + shared-volume: + opensearch-data: + dataset_gen_ollama_models: + minio_data: + cron_data: + +networks: + bykstack: + name: bykstack + driver: bridge + \ No newline at end of file diff --git a/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio b/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio index 5736fa02..ce23baeb 100644 --- a/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio +++ b/docs/achitecture/global-classifier-dataset-pipeline-architecture -v2.drawio @@ -1,6 +1,6 @@ - + - + @@ -704,64 +704,126 @@ - + - - + + - + - + - + - + - + - + - - + + + + + - + - + - + - + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From ee4f5b32f6e9ab6643c0f83a91119c96b26605e7 Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Wed, 16 Jul 2025 12:21:53 +0530 Subject: [PATCH 094/195] updated docker-compose-dev to have defined volume names, added triton inference sever configuration, minio integration for triton model repository --- docker-compose-dev.yml | 6 +++ docker-compose-infer-cpu.yml | 52 ++++++++++++++++++++++++++ docker-compose-infer-gpu.yml | 72 ++++++++++++++++++++++++++++++++++++ sidecar.env | 2 +- 4 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 docker-compose-infer-cpu.yml create mode 100644 docker-compose-infer-gpu.yml diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index b2a52c0b..f365b18f 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -1,3 +1,4 @@ + services: # Docker compose without dataset generator services @@ -325,10 +326,15 @@ services: volumes: shared-volume: + name: shared-volume opensearch-data: + name: opensearch-data dataset_gen_ollama_models: + name: dataset_gen_ollama_models minio_data: + name: minio_data cron_data: + name: cron_data networks: bykstack: diff --git a/docker-compose-infer-cpu.yml b/docker-compose-infer-cpu.yml new file mode 100644 index 00000000..9ebd69d2 --- /dev/null +++ b/docker-compose-infer-cpu.yml @@ -0,0 +1,52 @@ +services: + + triton-production-server: + image: nvcr.io/nvidia/tritonserver:25.06-py3 + container_name: triton-production-server + command: tritonserver --model-repository=s3://minio:9000/production-model/model-repository --model-control-mode=explicit --log-verbose=1 + ports: + - "6000:8000" + - "6001:8001" + - "6002:8002" + volumes: + - cron_data:/data_models + + environment: + - AWS_ACCESS_KEY_ID=minioadmin + - AWS_SECRET_ACCESS_KEY=minioadmin + - AWS_ENDPOINT_URL=http://minio:9000 + - AWS_REGION=eu-central-1 + networks: + - bykstack + + triton-test-server: + image: nvcr.io/nvidia/tritonserver:25.06-py3 + container_name: triton-test-server + command: tritonserver --model-repository=s3://minio:9000/test-models/model-repository --model-control-mode=explicit --log-verbose=1 + ports: + - "4000:8000" + - "4001:8001" + - "4002:8002" + volumes: + - cron_data:/data_models + environment: + - AWS_ACCESS_KEY_ID=minioadmin + - AWS_SECRET_ACCESS_KEY=minioadmin + - AWS_ENDPOINT_URL=http://minio:9000 + - AWS_REGION=eu-central-1 + networks: + - bykstack + +volumes: + shared-volume: + external: true + name: shared-volume + cron_data: + external: true + name: cron_data + +networks: + bykstack: + name: bykstack + external: true + \ No newline at end of file diff --git a/docker-compose-infer-gpu.yml b/docker-compose-infer-gpu.yml new file mode 100644 index 00000000..cc8b6065 --- /dev/null +++ b/docker-compose-infer-gpu.yml @@ -0,0 +1,72 @@ +services: + + triton-production-server: + image: nvcr.io/nvidia/tritonserver:25.06-py3 + container_name: triton-production-server + command: tritonserver --model-repository=s3://minio:9000/production-model/model-repository --model-control-mode=explicit --log-verbose=1 + ports: + - "6000:8000" + - "6001:8001" + - "6002:8002" + + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + + volumes: + - cron_data:/data_models + + environment: + - AWS_ACCESS_KEY_ID=minioadmin + - AWS_SECRET_ACCESS_KEY=minioadmin + - AWS_ENDPOINT_URL=http://minio:9000 + - AWS_REGION=eu-central-1 + networks: + - bykstack + + + triton-test-server: + image: nvcr.io/nvidia/tritonserver:25.06-py3 + container_name: triton-test-server + command: tritonserver --model-repository=s3://minio:9000/test-models/model-repository --model-control-mode=explicit --log-verbose=1 + ports: + - "4000:8000" + - "4001:8001" + - "4002:8002" + + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + + volumes: + - cron_data:/data_models + environment: + - AWS_ACCESS_KEY_ID=minioadmin + - AWS_SECRET_ACCESS_KEY=minioadmin + - AWS_ENDPOINT_URL=http://minio:9000 + - AWS_REGION=eu-central-1 + networks: + - bykstack + + +volumes: + shared-volume: + external: true + name: shared-volume + cron_data: + external: true + name: cron_data + +networks: + bykstack: + name: bykstack + external: true + \ No newline at end of file diff --git a/sidecar.env b/sidecar.env index 7c49bc0b..95b34ce4 100644 --- a/sidecar.env +++ b/sidecar.env @@ -11,5 +11,5 @@ MLFLOW_HOST_CONFIG_PATH=./mlflow/config MLFLOW_CONT_CONFIG_PATH=/mlflow/config MLFLOW_FLASK_SERVER_SECRET_KEY=byk-mlflow-secret MINIO_ROOT_USER=minioadmin -MINIO_ROOT_PASSWORD=value +MINIO_ROOT_PASSWORD=minioadmin MINIO_BROWSER_REDIRECT_URL=http://localhost:9001 \ No newline at end of file From dd0d61d66e7c44cc81a38b1c9cf9f3717af2aa72 Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Wed, 16 Jul 2025 12:31:28 +0530 Subject: [PATCH 095/195] added ruff check --- src/inference/prod/Dockerfile | 0 src/inference/prod/prod_inference_api.py | 24 -------------------- src/inference/testing/test_inference_api.py | 0 src/tests/inference/prod_deployment_tests.py | 5 +--- 4 files changed, 1 insertion(+), 28 deletions(-) delete mode 100644 src/inference/prod/Dockerfile delete mode 100644 src/inference/prod/prod_inference_api.py delete mode 100644 src/inference/testing/test_inference_api.py diff --git a/src/inference/prod/Dockerfile b/src/inference/prod/Dockerfile deleted file mode 100644 index e69de29b..00000000 diff --git a/src/inference/prod/prod_inference_api.py b/src/inference/prod/prod_inference_api.py deleted file mode 100644 index 87f336f6..00000000 --- a/src/inference/prod/prod_inference_api.py +++ /dev/null @@ -1,24 +0,0 @@ -from model-training.trainingpipeline import device -import os -from typing import Optional, Dict, List, Any -import litserve as ls - -#TODO - Add endpoint to download model file from S3 ferry -#TODO - Add endpoint to hot swap model file from S3 ferry -#TODO - Add ruuter API calls to update model handler endpoint -#TODO - Add endpoint to remove serving of model and replace with default empty model - - -class ProdInferenceAPI(ls.LitAPI): - """ - Production inference API for serving models. - """ - - def setup(self, device="cpu") -> None: - """ - Setup the inference API with the specified device. - """ - - self.model = default_model() - `` - \ No newline at end of file diff --git a/src/inference/testing/test_inference_api.py b/src/inference/testing/test_inference_api.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/tests/inference/prod_deployment_tests.py b/src/tests/inference/prod_deployment_tests.py index 39c782d4..fb2088d5 100644 --- a/src/tests/inference/prod_deployment_tests.py +++ b/src/tests/inference/prod_deployment_tests.py @@ -1,6 +1,3 @@ -import pytest - - def test_load_model(): """Test the load_model function.""" pass @@ -18,4 +15,4 @@ def test_delete_model(): def test_model_inference(): """Test model inference functionality.""" - pass \ No newline at end of file + pass From 2fa95dfb295295efacf601165be1da5fe5c2de5a Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Wed, 16 Jul 2025 15:46:30 +0530 Subject: [PATCH 096/195] changed model s3 path --- docker-compose.yml | 20 ++++++++++---------- src/training/scripts/s3_utility_handler.py | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 8b60d8c8..9a767951 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -196,20 +196,20 @@ services: ports: - "11434:11434" environment: - # - NVIDIA_VISIBLE_DEVICES=all - # - OLLAMA_USE_GPU=1 + - NVIDIA_VISIBLE_DEVICES=all + - OLLAMA_USE_GPU=1 - OLLAMA_HOST=0.0.0.0 volumes: - dataset_gen_ollama_models:/root/.ollama - ./DSL/DatasetGenerator/ollama-entrypoint.sh:/ollama-entrypoint.sh entrypoint: ["bash", "/ollama-entrypoint.sh"] - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: [gpu] + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] networks: - bykstack @@ -241,7 +241,7 @@ services: dockerfile: Dockerfile container_name: mlflow ports: - - "5000:5000" + - "5001:5000" env_file: - sidecar.env environment: diff --git a/src/training/scripts/s3_utility_handler.py b/src/training/scripts/s3_utility_handler.py index 3e8ac982..a34f35ba 100644 --- a/src/training/scripts/s3_utility_handler.py +++ b/src/training/scripts/s3_utility_handler.py @@ -137,14 +137,14 @@ def upload_trained_model( # Create zip file name with timestamp timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") model_id = str(model_id) - zip_filename = f"{model_type}_dataset_{model_id}_model_{timestamp}.zip" + zip_filename = f"{model_id}.zip" zip_path = os.path.join(self.models_dir, zip_filename) # Zip the model directory self._zip_directory(model_dir, zip_path) # Upload to S3 - s3_dest_path = f"models/{model_id}/{zip_filename}" + s3_dest_path = f"models/undeployed/{zip_filename}" logger.info(f"Uploading zipped model to S3: {s3_dest_path}") From cee3611c7ca06394ecaffb3878f274006242ed62 Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Wed, 16 Jul 2025 15:56:57 +0530 Subject: [PATCH 097/195] fixed ruff lint issues --- src/training/scripts/s3_utility_handler.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/training/scripts/s3_utility_handler.py b/src/training/scripts/s3_utility_handler.py index a34f35ba..bc6db371 100644 --- a/src/training/scripts/s3_utility_handler.py +++ b/src/training/scripts/s3_utility_handler.py @@ -2,7 +2,6 @@ import sys import zipfile from pathlib import Path -from datetime import datetime from scripts.s3_ferry_service import S3Ferry @@ -134,8 +133,7 @@ def upload_trained_model( try: logger.info(f"Preparing to upload model from: {model_dir}") - # Create zip file name with timestamp - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + # Create zip file name model_id = str(model_id) zip_filename = f"{model_id}.zip" zip_path = os.path.join(self.models_dir, zip_filename) From feded7b79a5475cad910834282ffe6d0d7f0a0a3 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 17 Jul 2025 21:12:52 +0530 Subject: [PATCH 098/195] feat: Add training results display and update data model types for enhanced performance tracking --- .../molecules/DataModelCard/index.tsx | 66 +-------- .../molecules/DataModelForm/index.tsx | 139 ++++++++++-------- .../TrainingResults/TrainingResults.scss | 55 +++++++ .../molecules/TrainingResults/index.tsx | 79 ++++++++++ .../pages/DataModels/ConfigureDataModel.tsx | 3 + GUI/src/pages/DataModels/index.tsx | 8 +- GUI/src/types/dataModels.ts | 1 + 7 files changed, 227 insertions(+), 124 deletions(-) create mode 100644 GUI/src/components/molecules/TrainingResults/TrainingResults.scss create mode 100644 GUI/src/components/molecules/TrainingResults/index.tsx diff --git a/GUI/src/components/molecules/DataModelCard/index.tsx b/GUI/src/components/molecules/DataModelCard/index.tsx index 7b61357b..c0c54cd9 100644 --- a/GUI/src/components/molecules/DataModelCard/index.tsx +++ b/GUI/src/components/molecules/DataModelCard/index.tsx @@ -9,6 +9,7 @@ import { useTranslation } from 'react-i18next'; import { TrainingResults } from 'types/dataModels'; import { formatDate } from 'utils/commonUtilts'; import { useNavigate } from 'react-router-dom'; +import ModelResults from '../TrainingResults'; type DataModelCardProps = { modelId: number | string; @@ -20,7 +21,7 @@ type DataModelCardProps = { trainingStatus?: string; modelStatus?: string; deploymentEnv?: string; - results?: string | null; + results?: any | null; }; const DataModelCard: FC> = ({ @@ -38,13 +39,13 @@ const DataModelCard: FC> = ({ }) => { const { open, close } = useDialog(); const { t } = useTranslation(); - const resultsJsonData: TrainingResults = JSON.parse(results ?? '{}'); + const trainingResults = results?.value && JSON.parse(results?.value) || null; const navigate = useNavigate(); const configureDataModel = () => { navigate(`/configure-datamodel?datamodelId=${modelId}`); +} -}; const renderTrainingStatus = (status: string | undefined) => { if (status === TrainingStatus.RETRAINING_NEEDED) { return ( @@ -136,68 +137,15 @@ const configureDataModel = () => { ), size: 'large', content: ( -
    -
    - {t('dataModels.trainingResults.bestPerformingModel') ?? - ''} - - -
    - -
    - {' '} - {t('dataModels.trainingResults.classes') ?? ''} -
    -
    - {t('dataModels.trainingResults.accuracy') ?? ''} -
    -
    - {t('dataModels.trainingResults.f1Score') ?? ''} -
    -
    - } - > +
    {results ? ( -
    -
    - {resultsJsonData?.trainingResults?.classes?.map( - (c: string, index: number) => { - return
    {c}
    ; - } - )} -
    -
    - {resultsJsonData?.trainingResults?.accuracy?.map( - (c: string, index: number) => { - return ( -
    - {parseFloat(c)?.toFixed(2)} -
    - ); - } - )} -
    -
    - {resultsJsonData?.trainingResults?.f1_score?.map( - (c: string, index: number) => { - return ( -
    - {parseFloat(c)?.toFixed(2)} -
    - ); - } - )} -
    -
    + ) : (
    {t('dataModels.trainingResults.noResults') ?? ''}
    )} - -
    +
    ), }); }} diff --git a/GUI/src/components/molecules/DataModelForm/index.tsx b/GUI/src/components/molecules/DataModelForm/index.tsx index 9343003a..ff8004ca 100644 --- a/GUI/src/components/molecules/DataModelForm/index.tsx +++ b/GUI/src/components/molecules/DataModelForm/index.tsx @@ -1,4 +1,4 @@ -import { FC } from 'react'; +import { FC, useState } from 'react'; import { useTranslation } from 'react-i18next'; import { FormCheckboxes, @@ -14,6 +14,7 @@ import { DataModel } from 'types/dataModels'; import { dataModelsQueryKeys, datasetQueryKeys } from 'utils/queryKeys'; import { getDeploymentEnvironments } from 'services/datamodels'; import { getAllDatasetVersions } from 'services/datasets'; +import ModelResults from '../TrainingResults'; type DataModelFormType = { dataModel: any; @@ -29,7 +30,8 @@ const DataModelForm: FC = ({ type, }) => { const { t } = useTranslation(); - const { data: deploymentEnvironmentsData } = useQuery({ + const [showTrainingResults, setShowTrainingResults] = useState(true); + const { data: deploymentEnvironmentsData } = useQuery({ queryKey: datasetQueryKeys.DATASET_VERSIONS(), queryFn: () => getDeploymentEnvironments(), }); @@ -37,8 +39,10 @@ const DataModelForm: FC = ({ const { data: datasetVersions } = useQuery({ queryKey: dataModelsQueryKeys.DATA_MODEL_DEPLOYMENT_ENVIRONMENTS(), queryFn: () => getAllDatasetVersions(), - }); - + }); + + const trainingResults = dataModel?.trainingResults?.value && JSON.parse(dataModel?.trainingResults?.value) || null; + return (
    {type === 'create' ? ( @@ -65,67 +69,80 @@ const DataModelForm: FC = ({ )} {((type === 'configure') || type === 'create') - ? ( -
    -
    - {t('dataModels.dataModelForm.datasetGroup')}{' '} -
    -
    - { - handleChange('datasetId', selection?.value); - }} - value={dataModel?.datasetId === null && t('dataModels.dataModelForm.errors.datasetVersionNotExist')} - defaultValue={dataModel?.datasetId ? dataModel?.datasetId : t('dataModels.dataModelForm.errors.datasetVersionNotExist')} - error={errors?.datasetId} - /> -
    - {(type === 'configure') && !dataModel.datasetId && {t('dataModels.dataModelForm.errors.datasetVersionNotExist')}} + ? ( +
    +
    + {t('dataModels.dataModelForm.datasetGroup')}{' '} +
    +
    + { + handleChange('datasetId', selection?.value); + }} + value={dataModel?.datasetId === null && t('dataModels.dataModelForm.errors.datasetVersionNotExist')} + defaultValue={dataModel?.datasetId ? dataModel?.datasetId : t('dataModels.dataModelForm.errors.datasetVersionNotExist')} + error={errors?.datasetId} + /> +
    + {(type === 'configure') && !dataModel.datasetId && {t('dataModels.dataModelForm.errors.datasetVersionNotExist')}} +
    -
    -
    - {t('dataModels.dataModelForm.baseModels')}{' '} -
    -
    - - handleChange('baseModels', values.baseModels) - } - error={errors?.baseModels} - selectedValues={dataModel?.baseModels} - /> -
    +
    + {t('dataModels.dataModelForm.baseModels')}{' '} +
    -
    - {t('dataModels.dataModelForm.deploymentPlatform')}{' '} -
    -
    - handleChange('deploymentEnvironment', value)} - error={errors?.deploymentEnvironment} - selectedValue={dataModel?.deploymentEnvironment} - /> +
    + + handleChange('baseModels', values.baseModels) + } + error={errors?.baseModels} + selectedValues={dataModel?.baseModels} + /> + {type === 'configure' && trainingResults && ( + setShowTrainingResults((prev) => !prev)} + > + {showTrainingResults ? "Hide Training Results" : "View Training Results"} + + )} +
    + {showTrainingResults && } + +
    + {t('dataModels.dataModelForm.deploymentPlatform')}{' '} +
    +
    + handleChange('deploymentEnvironment', value)} + error={errors?.deploymentEnvironment} + selectedValue={dataModel?.deploymentEnvironment} + /> +
    -
    - ) : ( - - )} + ) : ( + + )}
    ); }; diff --git a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss new file mode 100644 index 00000000..0537cc5b --- /dev/null +++ b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss @@ -0,0 +1,55 @@ +.results-wrapper { + padding: 20px; + background-color: #fff; +} + +.best-model-header { + font-size: 1rem; + font-weight: 500; + margin-bottom: 1rem; + color:#4D4F5D +} + +.section-title { + font-size: .9rem; + margin-bottom: 20px; + color: #4D4F5D; +} + +.model-section { + margin-bottom: 2rem; +} + +.model-name { + font-size: .9rem; + margin-bottom: 1rem; + margin-top: 1rem; + color:#4D4F5D +} + +.metrics-grid { + display: flex; + flex-wrap: wrap; + gap: 16px; +} + +.metric-card { + background-color: #f7f7f7; + border: 1px solid #ddd; + border-radius: 6px; + padding: 12px 16px; + width: 200px; + box-shadow: 0 2px 4px rgba(0,0,0,0.05); +} + +.metric-row { + display: flex; + justify-content: space-between; + margin-bottom: 6px; + font-size: 14px; +} + +.metric-label { + font-weight: 600; + color: #444; +} diff --git a/GUI/src/components/molecules/TrainingResults/index.tsx b/GUI/src/components/molecules/TrainingResults/index.tsx new file mode 100644 index 00000000..e0868ebb --- /dev/null +++ b/GUI/src/components/molecules/TrainingResults/index.tsx @@ -0,0 +1,79 @@ +import './TrainingResults.scss'; + +export type ClassMetrics = { + f1: number; + recall: number; + accuracy: number; + precision: number; +}; + +export type ModelPerformance = { + model_type: string; + class_metrics: { + [className: string]: ClassMetrics; + }; +}; + +export type ModelResultsProps = { + models: ModelPerformance[]; +}; + +const ModelResults: React.FC = ({ models }) => { + return ( +
    +

    + Best Performing Model - {models?.[0]?.model_type || "N/A"} +

    +

    Training Results

    + + {models?.map((model, idx) => ( +
    +
    {model.model_type}
    + +
    +
    +
    Classes
    +
    +
    F1
    +
    Recall
    +
    Accuracy
    +
    Precision
    +
    + +
    +
    + {Object.entries(model?.class_metrics).map(([className, metrics]) => ( +
    +
    +
    {className}
    +
    +
    {metrics.f1}
    +
    {metrics.recall}
    +
    {metrics.accuracy}
    +
    {metrics.precision}
    +
    +
    +
    + ))} +
    +
    + ))} +
    + ); +}; + +export default ModelResults; \ No newline at end of file diff --git a/GUI/src/pages/DataModels/ConfigureDataModel.tsx b/GUI/src/pages/DataModels/ConfigureDataModel.tsx index 65e24ff4..75998bce 100644 --- a/GUI/src/pages/DataModels/ConfigureDataModel.tsx +++ b/GUI/src/pages/DataModels/ConfigureDataModel.tsx @@ -56,6 +56,7 @@ const ConfigureDataModel: FC = () => { baseModels: modelMetadata ? JSON.parse(modelMetadata?.baseModels.value) : [], deploymentEnvironment: modelMetadata?.deploymentEnv, version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, + trainingResults: modelMetadata?.trainingResults, }); useEffect(() => { @@ -74,6 +75,8 @@ const ConfigureDataModel: FC = () => { baseModels: modelMetadata ? JSON.parse(modelMetadata?.baseModels.value) : [], deploymentEnvironment: modelMetadata?.deploymentEnv, version: `V${modelMetadata?.major}.${modelMetadata?.minor}`, + trainingResults: modelMetadata?.trainingResults, + }); }, [modelMetadata]); diff --git a/GUI/src/pages/DataModels/index.tsx b/GUI/src/pages/DataModels/index.tsx index bcb9037b..249b8e5f 100644 --- a/GUI/src/pages/DataModels/index.tsx +++ b/GUI/src/pages/DataModels/index.tsx @@ -4,7 +4,7 @@ import { Button, FormSelect } from 'components'; import Pagination from 'components/molecules/Pagination'; import { useQuery } from '@tanstack/react-query'; import { useNavigate } from 'react-router-dom'; -import { formattedArray, parseVersionString } from 'utils/commonUtilts'; +import { formattedArray } from 'utils/commonUtilts'; import DataModelCard from 'components/molecules/DataModelCard'; import CircularSpinner from 'components/molecules/CircularSpinner/CircularSpinner'; import { ButtonAppearanceTypes } from 'enums/commonEnums'; @@ -123,7 +123,7 @@ const DataModels: FC = () => { label="" name="" placeholder={t('dataModels.filters.maturity') ?? ''} - options={formattedArray(deploymentEnvironmentsData[0]?.deploymentEnvironments) ?? []} + options={formattedArray(deploymentEnvironmentsData?.[0]?.deploymentEnvironments) ?? []} onSelectionChange={(selection) => handleFilterChange('deploymentEnvironment', selection?.value) } @@ -189,7 +189,7 @@ const DataModels: FC = () => { trainingStatus={prodDataModel.trainingStatus} modelStatus={prodDataModel?.modelStatus} deploymentEnv={prodDataModel?.deploymentEnv} - // results={model?.trainingResults ?? null} + results={prodDataModel?.trainingResults ?? null} />
    } @@ -211,7 +211,7 @@ const DataModels: FC = () => { trainingStatus={model.trainingStatus} modelStatus={model?.modelStatus} deploymentEnv={model?.deploymentEnv} - // results={model?.trainingResults ?? null} + // results={model?.trainingResults ?? null} /> ); diff --git a/GUI/src/types/dataModels.ts b/GUI/src/types/dataModels.ts index 8e39961f..65c2fd4c 100644 --- a/GUI/src/types/dataModels.ts +++ b/GUI/src/types/dataModels.ts @@ -6,6 +6,7 @@ export type DataModel = { baseModels: string[]; deploymentEnvironment: string; version?: string; + trainingResults?: Object | null; }; export type TrainingProgressData = { From 2f2337861cd68bc184039549affed37c2c6dcf7f Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Thu, 17 Jul 2025 21:23:56 +0530 Subject: [PATCH 099/195] added minio files to be mounted in directory instead of docker volume and added minio_data directory to .gitignore --- .gitignore | 1 + docker-compose-dev.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8b721232..5f1f2c0f 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,4 @@ dataset_artifacts/ venv /signed.py /minio_presigned_urls.txt +minio_data/ diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index f365b18f..ccf86e38 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -258,7 +258,7 @@ services: - MINIO_BROWSER_REDIRECT_URL=${MINIO_BROWSER_REDIRECT_URL} command: server /data --console-address ":9001" volumes: - - minio_data:/data + - ./minio_data:/data ports: - "9000:9000" # API port - "9001:9001" # Console port From 4a1fe8096543dd39395d9c5c588cd1aa34160955 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 17 Jul 2025 21:38:30 +0530 Subject: [PATCH 100/195] Add TestModel component and associated styles --- GUI/src/App.tsx | 3 + GUI/src/pages/TestModel/TestModel.scss | 17 ++++++ GUI/src/pages/TestModel/index.tsx | 85 ++++++++++++++++++++++++++ 3 files changed, 105 insertions(+) create mode 100644 GUI/src/pages/TestModel/TestModel.scss create mode 100644 GUI/src/pages/TestModel/index.tsx diff --git a/GUI/src/App.tsx b/GUI/src/App.tsx index 921d63f6..6f7cfdf7 100644 --- a/GUI/src/App.tsx +++ b/GUI/src/App.tsx @@ -15,6 +15,7 @@ import ViewDataset from 'pages/ViewDataset'; import DataModels from 'pages/DataModels'; import CreateDataModel from 'pages/DataModels/CreateDataModel'; import ConfigureDataModel from 'pages/DataModels/ConfigureDataModel'; +import TestModel from 'pages/TestModel'; const App: FC = () => { const navigate = useNavigate(); @@ -70,6 +71,8 @@ const App: FC = () => { } /> } /> } /> + } /> + diff --git a/GUI/src/pages/TestModel/TestModel.scss b/GUI/src/pages/TestModel/TestModel.scss new file mode 100644 index 00000000..b49ccc20 --- /dev/null +++ b/GUI/src/pages/TestModel/TestModel.scss @@ -0,0 +1,17 @@ +.testModalFormTextArea { + margin-top: 30px; +} + +.testModalClassifyButton { + text-align: right; + margin-top: 20px; +} + +.testModalList { + list-style: disc; + margin-left: 30px; +} + +.mt-20 { + margin-top: 20px; +} diff --git a/GUI/src/pages/TestModel/index.tsx b/GUI/src/pages/TestModel/index.tsx new file mode 100644 index 00000000..acc456bd --- /dev/null +++ b/GUI/src/pages/TestModel/index.tsx @@ -0,0 +1,85 @@ +import { useMutation, useQuery } from '@tanstack/react-query'; +import { Button, FormSelect, FormTextarea } from 'components'; +import CircularSpinner from 'components/molecules/CircularSpinner/CircularSpinner'; +import { FC, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import apiDev from 'services/api-dev'; +import { + ClassifyTestModalPayloadType, + TestModalDropdownSelectionType, + TestModelType, +} from 'types/testModelTypes'; + +import './TestModel.scss'; + +const TestModel: FC = () => { + const { t } = useTranslation(); + const isLoading = false; + const [modelOptions, setModelOptions] = useState< + TestModalDropdownSelectionType[] + >([]); + + const [testModel, setTestModel] = useState({ + modelId: null, + text: '', + }); + + const handleChange = (key: string, value: string | number) => { + setTestModel((prev) => ({ + ...prev, + [key]: value, + })); + }; + + return ( +
    + {isLoading ? ( + + ) : ( +
    +
    +
    {t('testModels.title')}
    +
    +
    +

    {t('testModels.selectionLabel')}

    +
    + { + handleChange('modelId', selection?.value as string); + }} + /> + +
    + +
    + +
    +

    {t('testModels.classifyTextLabel')}

    + handleChange('text', e.target.value)} + showMaxLength={true} + /> +
    +
    + +
    +
    + )} +
    + ); +}; + +export default TestModel; \ No newline at end of file From c9cbe9074eca3b7e9bec068ffedd3669ec449dd1 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 17 Jul 2025 21:40:15 +0530 Subject: [PATCH 101/195] feat: Refactor TrainingResults component for improved layout and styling --- .../TrainingResults/TrainingResults.scss | 45 +++++++++++++++++++ .../molecules/TrainingResults/index.tsx | 39 +++++----------- 2 files changed, 56 insertions(+), 28 deletions(-) diff --git a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss index 0537cc5b..753e6313 100644 --- a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss +++ b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss @@ -53,3 +53,48 @@ font-weight: 600; color: #444; } + +.header-row { + display: flex; + justify-content: space-between; + margin-bottom: 1rem; +} + +.header-classes { + width: 30%; + font-weight: 600; + color: #444; +} + +.header-metrics { + display: grid; + grid-template-columns: repeat(4, 7rem); + gap: 1rem; + width: 70%; + font-weight: 600; + color: #444; +} + +.hr-divider { + border: none; + border-top: 1px solid #D1D1D1; + margin: 0; +} + +.metric-row { + display: flex; + justify-content: space-between; + width: auto; + padding: .5rem 0rem; +} + +.metric-class { + width: 30%; +} + +.metric-values { + display: grid; + grid-template-columns: repeat(4, 7rem); + gap: 1rem; + width: 70%; +} \ No newline at end of file diff --git a/GUI/src/components/molecules/TrainingResults/index.tsx b/GUI/src/components/molecules/TrainingResults/index.tsx index e0868ebb..ceac09b0 100644 --- a/GUI/src/components/molecules/TrainingResults/index.tsx +++ b/GUI/src/components/molecules/TrainingResults/index.tsx @@ -31,41 +31,24 @@ const ModelResults: React.FC = ({ models }) => {
    {model.model_type}
    -
    -
    Classes
    -
    +
    +
    Classes
    +
    F1
    Recall
    Accuracy
    Precision
    -
    -
    +
    {Object.entries(model?.class_metrics).map(([className, metrics]) => ( -
    -
    -
    {className}
    -
    -
    {metrics.f1}
    -
    {metrics.recall}
    -
    {metrics.accuracy}
    -
    {metrics.precision}
    -
    +
    +
    {className}
    +
    +
    {metrics.f1}
    +
    {metrics.recall}
    +
    {metrics.accuracy}
    +
    {metrics.precision}
    ))} From f7df7c588685e58b433f24b990a4e4399ad38af7 Mon Sep 17 00:00:00 2001 From: Thirunayan22 Date: Fri, 18 Jul 2025 00:58:38 +0530 Subject: [PATCH 102/195] updated constants.ini to contain triton server endpoints; added copilot instructions; add mock endpoints for model swapping --- .github/copilot-instructions.md | 125 +++++++++++++++ .../POST/inference/deploy.yml | 144 ++++++++++++++++++ constants.ini | 5 +- 3 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 .github/copilot-instructions.md create mode 100644 DSL/Ruuter.private/global-classifier/POST/inference/deploy.yml diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000..e084df9c --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,125 @@ +# Global Classifier AI Coding Instructions + +## Architecture Overview + +Global Classifier is a machine learning platform built on the **BYK Stack** - a microservices architecture with specialized DSL-based components: + +- **Ruuter**: API gateway handling REST endpoints via YAML DSL configurations (`DSL/Ruuter.public/`, `DSL/Ruuter.private/`) +- **Resql**: Database abstraction layer using SQL files as endpoints (`DSL/Resql/global-classifier/`) +- **Data Mapper**: Template engine for dynamic content generation (`DSL/DMapper/`) +- **TIM**: Authentication and authorization service +- **CronManager**: Scheduled task execution + +## Key Development Patterns + +### DSL-First API Development +APIs are defined declaratively in YAML files, not traditional controllers: +```yaml +# DSL/Ruuter.private/global-classifier/POST/inference/deploy.yml +declaration: + call: declare + method: post + accepts: json + allowlist: + body: + - field: modelId + type: string +``` + +### Database Operations via Resql +Database interactions use `.sql` files as endpoints, not ORM models: +```sql +-- DSL/Resql/global-classifier/POST/insert-data-models.sql +INSERT INTO public.data_models (model_name, deployment_env, base_models) +VALUES (:modelName, :deploymentEnv, :baseModels::jsonb) +``` + +### Service Configuration in constants.ini +All service URLs are centralized in `constants.ini` using `[#SERVICE_NAME]` placeholder syntax: +```ini +GLOBAL_CLASSIFIER_RUUTER_PRIVATE=http://ruuter-private:8088/global-classifier +GLOBAL_CLASSIFIER_RESQL=http://resql:8082/global-classifier +``` + +## Development Workflows + +### Environment Setup +```bash +# Use uv package manager (mandatory) +uv venv && uv sync +source .venv/bin/activate + +# Build required BYK stack images first +docker build -t ruuter . # in cloned Ruuter repo +docker build -t resql . # in cloned Resql repo +``` + +### Testing Requirements +- **Python**: Use `pytest` with 80% coverage minimum (`src/tests/`) +- **Frontend**: Playwright tests in `GUI/tests/` for critical user flows +- **Linting**: All code must pass `ruff check .` and `ruff format .` + +### Branch Strategy +1. **wip** → **testing** → **dev** (three-tier workflow) +2. All PRs target `wip` branch first +3. Automated validation in `testing` before promoting to `dev` + +## Component Structure + +### Python Services (`src/`) +- `training/`: ML model training scripts +- `inference/`: Model serving (prod/testing environments) +- `classifier-service/`: Node.js mock service for chat classification +- `dataset_file_handler/`: Data processing utilities + +### Frontend (`GUI/`) +- React + TypeScript with Vite +- Radix UI components and TanStack Query +- Multi-language support via `translations/` + +### Experiments (`experiments/`) +- `base_model_training/`: BERT/RoBERTa/XLM model experiments +- `ood_detection/`: Out-of-distribution detection research + +## Critical Integration Points + +### Authentication Flow +Routes use `.guard` files for auth checks. Private routes require cookie-based authentication validated through TIM service. + +### Model Deployment Pipeline +1. Create model metadata → Resql `insert-data-models.sql` +2. Update dataset connections → `update-datasets-connected-models.sql` +3. Initiate training → Call `/datamodels/train` endpoint +4. Environment progression: undeployed → testing → production + +### Configuration Management +- Docker services defined in multiple compose files (dev, inference-cpu, inference-gpu) +- Environment-specific configurations in `config.env` and `sidecar.env` +- Service discovery via DNS names in docker network `bykstack` + +## Common Anti-Patterns to Avoid + +- **Don't** create traditional REST controllers - use Ruuter YAML DSL +- **Don't** write raw SQL in application code - use Resql `.sql` files +- **Don't** hardcode service URLs - reference `constants.ini` placeholders +- **Don't** bypass authentication guards on private routes +- **Don't** use pip/conda - project requires `uv` package manager + +## Quick Reference + +### Adding New API Endpoint +1. Create YAML in `DSL/Ruuter.{public|private}/global-classifier/{METHOD}/` +2. Add SQL queries in `DSL/Resql/global-classifier/{METHOD}/` +3. Update service constants if calling external services +4. Add authentication guard if private endpoint + +### Running Services Locally +```bash +docker-compose up -d # Full stack +docker-compose -f docker-compose-dev.yml up # Development mode +``` + +### Model Training/Inference +- Training: Triggered via `/datamodels/train` POST endpoint +- Inference: Environment-specific deployments in `src/inference/{prod|testing}/` +- Models support: BERT, RoBERTa, XLM base architectures diff --git a/DSL/Ruuter.private/global-classifier/POST/inference/deploy.yml b/DSL/Ruuter.private/global-classifier/POST/inference/deploy.yml new file mode 100644 index 00000000..a3561647 --- /dev/null +++ b/DSL/Ruuter.private/global-classifier/POST/inference/deploy.yml @@ -0,0 +1,144 @@ +declaration: + call: declare + version: 0.1 + description: "Deploy a data model in a specified environment" + method: post + accepts: json + returns: json + namespace: global-classifier + # Input Validation Schema + allowlist: + body: + - field: modelId + type: number + description: "Model identifier (integer)" + - field: currentEnv + type: string + description: "Current environment: undeployed, testing, or production" + - field: targetEnv + type: string + description: "Target environment: undeployed, testing, or production" + next: extractRequestData + +# Data Extraction +extractRequestData: + assign: + modelId: ${incoming.body.modelId} + currentEnv: ${incoming.body.currentEnv} + targetEnv: ${incoming.body.targetEnv} + deploymentType: ${currentEnv + "_to_" + targetEnv} + next: validate_input + +# Required Field Validation +validate_input: + switch: + - condition: ${!modelId} + next: validation_error + - condition: ${!currentEnv || !['undeployed', 'testing', 'production'].includes(currentEnv)} + next: validation_error + - condition: ${!targetEnv || !['undeployed', 'testing', 'production'].includes(targetEnv)} + next: validation_error + - condition: ${currentEnv === targetEnv} + next: same_environment_error + next: selectDeploymentPath + +selectDeploymentPath: + switch: + + - condition: ${deploymentType === "undeployed_to_testing"} + next: pushUndeployedModelToTesting + + - condition: ${deploymentType === "testing_to_production"} + next: pushTestingModelToProduction + + - condition: ${deploymentType === "production_to_testing"} + next: pushProductionModelToTesting + + - condition: ${deploymentType === "testing_to_undeployed"} + next: pushTestingModelToUndeployed + + - condition: ${deploymentType === "production_to_undeployed"} + next: pushProductionModelToUndeployed + + - condition: ${deploymentType === "undeployed_to_production"} + next: pushUndeployedModelToProduction + + next: return_invalid_deployment_path_error + +pushUndeployedModelToTesting: + call: reflect.mock + args: + response: + success: true + message: "Model successfully pushed from undeployed to testing environment" + result: deployment_status + next: return_deployment_status + +pushTestingModelToProduction: + call: reflect.mock + args: + response: + success: true + message: "Model successfully pushed from testing to production environment" + result: deployment_status + next: return_deployment_status + +pushProductionModelToTesting: + call: reflect.mock + args: + response: + success: true + message: "Model successfully pushed from production to testing environment" + result: deployment_status + next: return_deployment_status + +pushTestingModelToUndeployed: + call: reflect.mock + args: + response: + success: true + message: "Model successfully pushed from testing to undeployed environment" + result: deployment_status + next: return_deployment_status + +pushProductionModelToUndeployed: + call: reflect.mock + args: + response: + success: true + message: "Model successfully pushed from production to undeployed environment" + result: deployment_status + next: return_deployment_status + +pushUndeployedModelToProduction: + call: reflect.mock + args: + response: + success: true + message: "Model successfully pushed from undeployed to production environment" + result: deployment_status + next: return_deployment_status + + +return_invalid_deployment_path_error: + return: "Invalid deployment path. Supported transitions - undeployed to testing, testing to production, production to testing, testing to undeployed, production to undeployed, undeployed to production" + status: 400 + next: end + + +validation_error: + return: "Invalid input. Required - model_id (number), current_env or target_env (undeployed|testing|production)" + status: 400 + next: end + +same_environment_error: + return: "Current environment and target environment cannot be the same" + status: 400 + next: end + +return_deployment_status: + return: ${deployment_status.response} + status: 200 + next: end + + diff --git a/constants.ini b/constants.ini index 2302c261..cee82f80 100644 --- a/constants.ini +++ b/constants.ini @@ -9,7 +9,10 @@ GLOBAL_CLASSIFIER_CRON_MANAGER=http://cron-manager:9010 GLOBAL_CLASSIFIER_FILE_HANDLER=http://file-handler:8000 GLOBAL_CLASSIFIER_NOTIFICATIONS=http://notifications-node:4040 GLOBAL_CLASSIFIER_ANONYMIZER=http://anonymizer:8010 -GLOBAL_CLASSIFIER_MODEL_INFERENCE=http://model-inference:8003 +GLOBAL_CLASSIFIER_TESTING_ENV_MODEL_SERVER=http://triton-test-server:8000 +GLOBAL_CLASSIFIER_TESTING_ENV_MODEL_METRICS=http://triton-test-server:8001 +GLOBAL_CLASSIFIER_PRODUCTION_ENV_MODEL_SERVER=http://triton-production-server:8000 +GLOBAL_CLASSIFIER_PRODUCTION_ENV_MODEL_METRICS=http://triton-production-server:8001 DOMAIN=localhost DB_PASSWORD=dbadmin CHATBOT_CLASSIFIER_SERVICE=http://classifier-service:8090 From bd3b3c33cfdcad2a5688cf6468dd515eb648c712 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Fri, 18 Jul 2025 12:09:05 +0530 Subject: [PATCH 103/195] update styles --- .../TrainingResults/TrainingResults.scss | 21 ++++++++++--------- .../molecules/TrainingResults/index.tsx | 2 +- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss index 753e6313..433df2ee 100644 --- a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss +++ b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss @@ -1,5 +1,5 @@ .results-wrapper { - padding: 20px; + padding: 1rem; background-color: #fff; } @@ -17,7 +17,7 @@ } .model-section { - margin-bottom: 2rem; + margin-bottom: 1.5rem; } .model-name { @@ -45,8 +45,10 @@ .metric-row { display: flex; justify-content: space-between; - margin-bottom: 6px; + margin-bottom: 0; font-size: 14px; + width: auto; + padding: .5rem 0rem; } .metric-label { @@ -81,13 +83,6 @@ margin: 0; } -.metric-row { - display: flex; - justify-content: space-between; - width: auto; - padding: .5rem 0rem; -} - .metric-class { width: 30%; } @@ -97,4 +92,10 @@ grid-template-columns: repeat(4, 7rem); gap: 1rem; width: 70%; +} + +.model-metrics-card { + border: 1px solid #E6E6E6; + padding: .5rem; + border-radius: 5px; } \ No newline at end of file diff --git a/GUI/src/components/molecules/TrainingResults/index.tsx b/GUI/src/components/molecules/TrainingResults/index.tsx index ceac09b0..1b63e327 100644 --- a/GUI/src/components/molecules/TrainingResults/index.tsx +++ b/GUI/src/components/molecules/TrainingResults/index.tsx @@ -30,7 +30,7 @@ const ModelResults: React.FC = ({ models }) => {
    {model.model_type}
    -
    +
    Classes
    From c13304ceada5288d4cdcce8596da4b4528dfa9a2 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Fri, 18 Jul 2025 12:31:34 +0530 Subject: [PATCH 104/195] feat: Update DataModelCard and DataModelForm --- .../molecules/DataModelCard/index.tsx | 43 ++++++---- .../molecules/DataModelForm/index.tsx | 12 ++- .../TrainingResults/TrainingResults.scss | 6 +- .../molecules/TrainingResults/index.tsx | 19 +---- GUI/src/types/dataModels.ts | 84 +++++++++++++++++-- 5 files changed, 116 insertions(+), 48 deletions(-) diff --git a/GUI/src/components/molecules/DataModelCard/index.tsx b/GUI/src/components/molecules/DataModelCard/index.tsx index c0c54cd9..00e97fcc 100644 --- a/GUI/src/components/molecules/DataModelCard/index.tsx +++ b/GUI/src/components/molecules/DataModelCard/index.tsx @@ -4,9 +4,8 @@ import Label from 'components/Label'; import { useDialog } from 'hooks/useDialog'; import './DataModel.scss'; import { Maturity, TrainingStatus } from 'enums/dataModelsEnums'; -import Card from 'components/Card'; import { useTranslation } from 'react-i18next'; -import { TrainingResults } from 'types/dataModels'; +import { TrainingResultsResponse } from 'types/dataModels'; import { formatDate } from 'utils/commonUtilts'; import { useNavigate } from 'react-router-dom'; import ModelResults from '../TrainingResults'; @@ -21,7 +20,7 @@ type DataModelCardProps = { trainingStatus?: string; modelStatus?: string; deploymentEnv?: string; - results?: any | null; + results?: TrainingResultsResponse | null; }; const DataModelCard: FC> = ({ @@ -39,12 +38,20 @@ const DataModelCard: FC> = ({ }) => { const { open, close } = useDialog(); const { t } = useTranslation(); - const trainingResults = results?.value && JSON.parse(results?.value) || null; -const navigate = useNavigate(); + const navigate = useNavigate(); -const configureDataModel = () => { - navigate(`/configure-datamodel?datamodelId=${modelId}`); -} + let trainingResults = null; + if (results?.value) { + try { + trainingResults = JSON.parse(results.value); + } catch (error) { + console.error("Failed to parse training results:", error); + } + } + + const configureDataModel = () => { + navigate(`/configure-datamodel?datamodelId=${modelId}`); + } const renderTrainingStatus = (status: string | undefined) => { if (status === TrainingStatus.RETRAINING_NEEDED) { @@ -116,7 +123,7 @@ const configureDataModel = () => { {lastTrained && formatDate(new Date(lastTrained), 'D.M.yy-H:m')}

    -
    +
    {renderTrainingStatus(trainingStatus)} {isLatest &&
    )} + {isProgressModalOpen && ( + setIsUpdateModalOpen(false)} + isOpen={ + isProgressModalOpen + } + footer={
    + + +
    } + > + {isUpdating ?
    +

    {t('datasets.detailedView.dataBeingUpdated')}

    +
    +
    :

    {t('datasets.detailedView.editDataRowDesc')}

    } + +
    + )} ); }; diff --git a/GUI/src/services/datamodels.ts b/GUI/src/services/datamodels.ts index 85faa65e..c6a8843d 100644 --- a/GUI/src/services/datamodels.ts +++ b/GUI/src/services/datamodels.ts @@ -101,7 +101,7 @@ export async function getAllModelVersions() { } export async function loadModel(modelId: number | string | null) { - const { data } = await apiPublic.post(dataModelsEndpoints.LOAD_MODEL(), { + const { data } = await apiDev.post(dataModelsEndpoints.LOAD_MODEL(), { modelId, }); return data?.response ?? []; @@ -112,7 +112,7 @@ export async function classify(data: ClassifyTestModalPayloadType) { testModelsEndpoints.CLASSIFY_TEST_MODELS(), { modelId: data.modelId, text: data.text }, ); - return response?.data?.response?.data as ClassifyTestModalResponseType ?? []; + return response?.data?.response as ClassifyTestModalResponseType ?? []; } export async function getDataModelsProgress() { const { data } = await apiDev.get(dataModelsEndpoints.GET_DATA_MODEL_PROGRESS()); diff --git a/GUI/src/services/datasets.ts b/GUI/src/services/datasets.ts index 8e362999..c2ddf01e 100644 --- a/GUI/src/services/datasets.ts +++ b/GUI/src/services/datasets.ts @@ -10,7 +10,7 @@ export async function getDatasetsOverview( params: { page: pageNum, generationStatus: "all", - sortBy:sort?.split(" ")?.[0], + sortBy: sort?.split(" ")?.[0], sortType: sort?.split(" ")?.[1], pageSize: OVERVIEW_PAGE_SIZE, }, @@ -19,7 +19,7 @@ export async function getDatasetsOverview( } export async function getDatasetMetadata( - datasetId: number |string) { + datasetId: number | string) { const { data } = await apiDev.get(datasetsEndpoints.GET_METADATA(), { params: { datasetId @@ -29,14 +29,17 @@ export async function getDatasetMetadata( } export async function getDatasetData( - datasetVersionId: number |string, + datasetVersionId: number | string, pageNum?: number, + clientId?: string + ) { const { data } = await apiDev.get(datasetsEndpoints.GET_DATASETS_DATA(), { params: { datasetVersionId, - pageNum : pageNum ?? 1, - pageSize:DATASET_PAGE_SIZE + pageNum: pageNum ?? 1, + pageSize: DATASET_PAGE_SIZE, + clientId: clientId ?? "all", }, }); return data?.response ?? []; @@ -50,4 +53,19 @@ export async function getAllDatasetVersions() { export async function getDataGenerationProgress() { const { data } = await apiDev.get(datasetsEndpoints.GET_DATA_GENERATION_PROGRESS()); return data?.response?.data; +} + +export async function updateDataset(payload: { + updatedDataItems: any[]; + deletedRows: (string | number)[]; + updatedRowsLength: number; + deletedRowsLength: number; +}) { + const { data } = await apiDev.post(datasetsEndpoints.UPDATE_DATASET(), payload); + return data?.response ?? {}; +} + +export async function deleteDataset(datasetVersionId: number | string) { + const { data } = await apiDev.post(datasetsEndpoints.DELETE_DATASET(), { datasetVersionId }); + return data?.response ?? {}; } \ No newline at end of file diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index 94013575..21bd4d9a 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -20,6 +20,9 @@ export const datasetsEndpoints = { GET_DATASETS_DATA: (): string => '/global-classifier/datasets/data', GET_ALL_DATASET_VERSIONS: (): string => '/global-classifier/datasets/versions', GET_DATA_GENERATION_PROGRESS: (): string => `/global-classifier/datasets/progress`, + UPDATE_DATASET: (): string => `/global-classifier/datasets/update`, + DELETE_DATASET: (): string => `/global-classifier/datasets/delete`, + GET_DATASET_FILTERS: (): string => @@ -58,7 +61,7 @@ export const dataModelsEndpoints = { CREATE_MINOR_VERSION: (): string => '/global-classifier/datamodels/minor', DELETE_MODEL: (): string => '/global-classifier/datamodels/delete', GET_ALL_DATAMODELS_VERSIONS: (): string => '/global-classifier/datamodels/versions', - LOAD_MODEL: (): string => '/global-classifier/datamodels/inference/load-model', + LOAD_MODEL: (): string => '/global-classifier/testmodel/load-model', GET_DATA_MODEL_PROGRESS: (): string => `global-classifier/datamodels/progress`, DEPLOY_MODEL: (): string => '/global-classifier/inference/deploy', @@ -75,6 +78,6 @@ export const dataModelsEndpoints = { export const testModelsEndpoints = { GET_MODELS: (): string => `/global-classifier/testmodel/models`, - CLASSIFY_TEST_MODELS: (): string => `/global-classifier/testmodel/test-data`, + CLASSIFY_TEST_MODELS: (): string => `/global-classifier/testmodel/classify`, }; diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index 5d4a866d..a23e1b21 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -53,7 +53,8 @@ "proceed":"Proceed", "maxFileSize":"File size should not exceed 20 MB.", "select": "-Select-", - "replace": "Replace" + "replace": "Replace", + "clearFilters": "Clear Filters" }, "menu": { "userManagement": "User Management", @@ -116,12 +117,12 @@ "title": "Integrated Clients", "search": "Search client", "sortOptions": { - "agencyAsc": "Agency Name A-Z", - "agencyDesc": "Agency Name Z-A", - "createdDateAsc": "Client Created Date Oldest First", - "createdDateDesc": "Client Created Date Latest First", - "lastUpdatedDateAsc": "Last Updated Date Oldest First", - "lastUpdatedDateDesc": "Last Updated Date Latest First" + "agencyAsc": "Client Name: A-Z", + "agencyDesc": "Client Name: Z-A", + "createdDateAsc": "Created: Oldest First", + "createdDateDesc": "Created: Newest First", + "lastUpdatedDateAsc": "Updated: Oldest First", + "lastUpdatedDateDesc": "Updated: Newest First" }, "agencyCard": { "lastModelTrained": "Last Model Trained", @@ -187,8 +188,8 @@ "title": "Datasets", "noDatasets": "No data sets available", "sortOptions": { - "createdDateAsc": "Created Date Oldest First", - "createdDateDesc": "Created Date Latest First" + "createdDateAsc": "Created: Oldest First", + "createdDateDesc": "Created: Newest First" }, "datasetCard": { "inProgress": "Data Generation in Progress", @@ -215,14 +216,25 @@ "data": "Data", "clientName": "Client Name", "patchUpdateBanner": "You have edited individual items in the dataset which are not saved. Please save the changes to apply", - "confirmPatchUpdatesTitle": "Confirm patch update", - "confirmPatchUpdatesDesc": "Changed data rows will be updated in the dataset", - "patchDataUnsuccessfulTitle": "Patch data update unsuccessful", - "patchDataUnsuccessfulDesc": "Something went wrong. Please try again.", + "confirmUpdateDatasetTitle": "Confirm dataset update", + "confirmUpdateDatasetDesc": "Changed data rows will be updated in the dataset", + "confirmDeleteDatasetTitle": "Confirm dataset deletion", + "confirmDeleteDatasetDesc": "Deleted data rows will be removed from the dataset", + "datasetUpdateUnsuccessfulTitle": "Dataset update unsuccessful", + "datasetUpdateUnsuccessfulDesc": "Something went wrong while updating the dataset. Please try again.", + "datasetUpdateSuccessfulTitle": "Dataset update successful", + "datasetUpdateSuccessfulDesc": "The dataset has been successfully updated.", + "datasetDeleteUnsuccessfulTitle": "Dataset delete unsuccessful", + "datasetDeleteUnsuccessfulDesc": "Something went wrong while deleting the dataset. Please try again.", + "datasetDeleteSuccessfulTitle": "Dataset delete successful", + "datasetDeleteSuccessfulDesc": "The dataset has been successfully deleted.", "exportDataSuccessTitle": "Data export was successful", "exportDataSuccessDesc": "Your data has been successfully exported.", "exportDataUnsucessTitle": "Dataset export unsuccessful", "exportDataUnsucessDesc": "Something went wrong. Please try again.", + "itemsUpdated": "items updated", + "itemsDeleted": "items deleted", + "dataBeingUpdated": "Data is being updated...", "table": { "id": "Item ID", "data": "Data", @@ -303,10 +315,10 @@ } }, "validationSessions": { - "title": "Validation Sessions", - "inprogress": "Validation in-Progress", - "fail": "Validation failed because {{class}} class found in the {{column}} column does not exist in hierarchy", - "noSessions": "No ongoing validation sessions available" + "title": "Data Generation Sessions", + "inprogress": "Data Generation in-Progress", + "fail": "Data Generation failed because {{class}} class found in the {{column}} column does not exist in hierarchy", + "noSessions": "No ongoing Data Generation sessions available" }, "correctedTexts": { "title": "Corrected Texts", @@ -335,10 +347,10 @@ "noProdModels": "No production models available", "noModels": "No models available", "sortOptions": { - "dataModelAsc": "Data Model Name A-Z", - "dataModelDesc": "Data Model Name Z-A", - "createdDateAsc": "Created Date Oldest First", - "createdDateDesc": "Created Date Latest First" + "dataModelAsc": "Data Model Name: A-Z", + "dataModelDesc": "Data Model Name: Z-A", + "createdDateAsc": "Created: Oldest First", + "createdDateDesc": "Created: Newest First" }, "filters": { "modelName": "Model Name", @@ -445,7 +457,14 @@ "classify": "Classify", "predictedHierarchy": "Predicted Class Hierarchy : ", "averageConfidence": "Average Confidence : ", - "classProbabilities": "Class Probabilities : " + "classProbabilities": "Class Probabilities : ", + "error": "Classification Error", + "errorDesc": "There was an issue classifying the text. Please try again. If the problem persists, contact support for assistance.", + "results": "Classification Results", + "topPrediction": "Top Prediction", + "allPredictions": "All Predictions", + "classificationFailed": "Classification failed. Please try again." + }, "optionLists": { "text": "Text", From 48720a2500a79f1cb7ece38ea2561f05daa0d756 Mon Sep 17 00:00:00 2001 From: Thiru Dinesh Date: Thu, 31 Jul 2025 02:58:39 +0530 Subject: [PATCH 183/195] updated endpoints.ts --- GUI/src/utils/endpoints.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GUI/src/utils/endpoints.ts b/GUI/src/utils/endpoints.ts index b3493a2e..78555081 100644 --- a/GUI/src/utils/endpoints.ts +++ b/GUI/src/utils/endpoints.ts @@ -58,7 +58,7 @@ export const dataModelsEndpoints = { CREATE_MINOR_VERSION: (): string => '/global-classifier/datamodels/minor', DELETE_MODEL: (): string => '/global-classifier/datamodels/delete', GET_ALL_DATAMODELS_VERSIONS: (): string => '/global-classifier/datamodels/versions', - LOAD_MODEL: (): string => '/global-classifier/datamodels/load', + LOAD_MODEL: (): string => '/global-classifier/testmodel/load', GET_DATA_MODEL_PROGRESS: (): string => `global-classifier/datamodels/progress`, DEPLOY_MODEL: (): string => '/global-classifier/datamodels/deploy', @@ -75,6 +75,6 @@ export const dataModelsEndpoints = { export const testModelsEndpoints = { GET_MODELS: (): string => `/global-classifier/testmodel/models`, - CLASSIFY_TEST_MODELS: (): string => `/global-classifier/testmodel/test-data`, + CLASSIFY_TEST_MODELS: (): string => `/global-classifier/testmodel/classify`, }; From 25074425c7d4c7b5abc434b9a5b06b5fe07f6c49 Mon Sep 17 00:00:00 2001 From: Thiru Dinesh Date: Thu, 31 Jul 2025 03:10:58 +0530 Subject: [PATCH 184/195] updated test model classify.yml template --- .../global-classifier/POST/testmodel/classify.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/DSL/Ruuter.private/global-classifier/POST/testmodel/classify.yml b/DSL/Ruuter.private/global-classifier/POST/testmodel/classify.yml index a64f50a3..5dfb7001 100644 --- a/DSL/Ruuter.private/global-classifier/POST/testmodel/classify.yml +++ b/DSL/Ruuter.private/global-classifier/POST/testmodel/classify.yml @@ -11,14 +11,14 @@ declaration: - field: modelId type: string description: "Identifier of the model to use for classification" - - field: message + - field: text type: string - description: "Text message to be classified" + description: "Text text to be classified" extractClassificationData: assign: modelId: ${incoming.body.modelId} - message: ${incoming.body.message} + text: ${incoming.body.text} startTime: ${Date.now()} next: validate_input @@ -26,7 +26,7 @@ validate_input: switch: - condition: ${!modelId} next: return_validation_error - - condition: ${!message} + - condition: ${!text} next: return_validation_error next: prepare_inference_payload @@ -45,7 +45,7 @@ call_triton_inference: datatype: "BYTES" shape: [1, 1] data: - - ${message} + - ${text} headers: Content-Type: "application/json" result: inference_result @@ -65,7 +65,7 @@ log_testing_inference_result: url: "[#GLOBAL_CLASSIFIER_RESQL]/insert-testing-inference-log" body: modelId: ${modelId} - message: ${message} + text: ${text} inferenceTimeMs: ${Date.now() - startTime} parsedOutput: ${JSON.stringify(parsedOutput)} result: log_result @@ -84,7 +84,7 @@ return_log_inference_error: return_validation_error: status: 400 - return: "Invalid payload. modelId and message are required." + return: "Invalid payload. modelId and text are required." next: end return_inference_error: From 7c665b8789fa6d80cb792ffb39a88620a4efef80 Mon Sep 17 00:00:00 2001 From: Thiru Dinesh Date: Thu, 31 Jul 2025 04:43:32 +0530 Subject: [PATCH 185/195] updated model trainer with training creation --- src/model-training/constants.py | 31 ++-- src/model-training/model_trainer.py | 226 +++++++++++++++++++++++++++- 2 files changed, 232 insertions(+), 25 deletions(-) diff --git a/src/model-training/constants.py b/src/model-training/constants.py index b39953cd..c0ce3a16 100644 --- a/src/model-training/constants.py +++ b/src/model-training/constants.py @@ -1,8 +1,8 @@ UPDATE_MODEL_TRAINING_STATUS_ENDPOINT = "http://ruuter-public:8086/global-classifier/datamodels/training/status/update" -CREATE_TRAINING_PROGRESS_SESSION_ENDPOINT = "http://ruuter-private:8088/global-classifier/datamodels/progress/create" +CREATE_TRAINING_PROGRESS_SESSION_ENDPOINT = "http://ruuter-public:8086/global-classifier/datamodels/progress/create" -UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT = "http://ruuter-private:8088/global-classifier/datamodels/progress/update" +UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT = "http://ruuter-public:8086/global-classifier/datamodels/progress/update" DEPLOYMENT_ENDPOINT = "http://ruuter-public:8086/global-classifier/inference/deploy" @@ -26,11 +26,6 @@ CLASSIFIER_MODEL_FILENAME = "classifier_{model_id}.pth" -MODEL_TRAINING_IN_PROGRESS = "training in-progress" - -MODEL_TRAINING_SUCCESSFUL = "trained" - -MODEL_TRAINING_FAILED = "not trained" # MODEL TRAINING PROGRESS SESSION CONSTANTS @@ -42,22 +37,22 @@ MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS = "Model Trained And Deployed" +TRAINING_FAILED_STATUS= "Training Failed" + +DEPLOYMENT_FAILED_STATUS = "Deployment Failed" + INITIATING_TRAINING_PROGRESS_MESSAGE = "Download and preparing dataset" -TRAINING_IN_PROGRESS_PROGRESS_MESSAGE = ( - "The dataset is being trained on all selected models" -) +TRAINING_IN_PROGRESS_PROGRESS_MESSAGE = "The dataset is being trained on all selected models" -DEPLOYING_MODEL_PROGRESS_MESSAGE = ( - "Model training complete. The trained model is now being deployed" -) -MODEL_TRAINED_AND_DEPLOYED_PROGRESS_MESSAGE = ( - "The model was trained and deployed successfully to the environment" -) +DEPLOYING_MODEL_PROGRESS_MESSAGE = "Model training complete. The trained model is now being deployed" -MODEL_TRAINING_FAILED_ERROR = "Training Failed" +MODEL_TRAINED_AND_DEPLOYED_PROGRESS_MESSAGE = "The model was trained and deployed successfully to the environment" + + +TRAINING_FAILED_STATUS_MESSAGE = "Model training has failed" INITIATING_TRAINING_PROGRESS_PERCENTAGE = 30 @@ -68,6 +63,8 @@ MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE = 100 +TRAINING_FAILED_PROGRESS_PERCENTAGE = 100 + # Supported Models for Testing SUPPORTED_BASE_MODELS = ["estbert", "xlm-roberta", "multilingual-distilbert"] diff --git a/src/model-training/model_trainer.py b/src/model-training/model_trainer.py index d3464f6f..59ccd83f 100644 --- a/src/model-training/model_trainer.py +++ b/src/model-training/model_trainer.py @@ -17,7 +17,25 @@ MODEL_TRAINING_SOURCE_PATH, DEPLOYMENT_ENDPOINT, CREATE_TRAINING_PROGRESS_SESSION_ENDPOINT, - UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT + UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT, + UPDATE_MODEL_TRAINING_STATUS_ENDPOINT, + INITIATING_TRAINING_PROGRESS_STATUS, + TRAINING_IN_PROGRESS_PROGRESS_STATUS, + DEPLOYING_MODEL_PROGRESS_STATUS, + MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS, + TRAINING_FAILED_STATUS, + DEPLOYMENT_FAILED_STATUS, + INITIATING_TRAINING_PROGRESS_PERCENTAGE, + TRAINING_IN_PROGRESS_PROGRESS_PERCENTAGE, + DEPLOYING_MODEL_PROGRESS_PERCENTAGE, + MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE, + INITIATING_TRAINING_PROGRESS_MESSAGE, + TRAINING_IN_PROGRESS_PROGRESS_MESSAGE, + DEPLOYING_MODEL_PROGRESS_MESSAGE, + MODEL_TRAINED_AND_DEPLOYED_PROGRESS_MESSAGE, + TRAINING_FAILED_STATUS_MESSAGE, + TRAINING_FAILED_PROGRESS_PERCENTAGE + ) from loguru import logger @@ -55,7 +73,7 @@ def __init__( self.current_deployment_platform = current_deployment_env self.target_deployment_platform = target_deployment_platform - self.progress_session_id = int(progress_session_id) + self.progress_session_id = "" except Exception as e: logger.error(f"EXCEPTION IN MODEL_TRAINER INIT : {e}") @@ -76,6 +94,168 @@ def get_current_timestamp(self): current_timestamp = int(datetime.now(timezone.utc).timestamp()) return current_timestamp + def create_training_progress_session(self): + """ + Create a training progress session in the database. + This function should be implemented to create a training progress session in the database. + """ + logger.info("Creating training progress session") + + payload = { + "modelId": int(self.model_id), + "modelName": self.model_name, + "majorVersion": self.major_version, + "minorVersion": self.minor_version, + "latest": self.latest(), + } + + logger.info(f"Prepared training progress session payload {payload}") + + try: + # Make request to create training progress session endpoint + response = requests.post( + url=CREATE_TRAINING_PROGRESS_SESSION_ENDPOINT, + json=payload, + headers={"Content-Type": "application/json"}, + timeout=300 # 5 minute timeout for creating progress session + ) + + logger.info(f"Create training progress session response - {response.status_code} - {response.text}") + + # Check if request was successful + response.raise_for_status() + + logger.info("Training progress session created successfully") + + session_data = response.json() + session_id = session_data["response"]["sessionId"] + + self.progress_session_id = session_id + + return response.json() + + except requests.HTTPError as e: + error_msg = f"HTTP error during creating training progress session: {e.response.status_code} - {e.response.text}" + logger.error(error_msg, model_id=self.model_id, status_code=e.response.status_code) + raise + + except requests.RequestException as e: + error_msg = f"Network error during creating training progress session: {str(e)}" + logger.error(error_msg, model_id=self.model_id) + raise + + except Exception as e: + error_msg = f"Unexpected error during creating training progress session: {str(e)}" + logger.error(error_msg, model_id=self.model_id) + raise + + def update_training_progression_session(self,training_status:str, training_message:str, progress_percentage:int, process_complete:bool): + """ + Update the training progress session in the database. + This function should be implemented to update the training progress session in the database. + """ + logger.info("Updating training progress session") + + if not self.progress_session_id: + logger.error("Progress session ID is not set. Cannot update training progress session.") + raise ValueError("Progress session ID is required to update the training progress session.") + + else: + + payload = { + "sessionId": self.progress_session_id, + "trainingStatus": training_status, + "trainingMessage": training_message, + "progressPercentage": progress_percentage, + "processComplete": process_complete + } + + logger.info(f"Prepared training progress session update payload {payload}") + + try: + # Make request to update training progress session endpoint + response = requests.post( + url=UPDATE_TRAINING_PROGRESS_SESSION_ENDPOINT, + json=payload, + headers={"Content-Type": "application/json"}, + timeout=300 # 5 minute timeout for updating progress session + ) + + logger.info(f"Update training progress session response - {response.status_code} - {response.text}") + + # Check if request was successful + response.raise_for_status() + + logger.info("Training progress session updated successfully") + + return response.json() + + except requests.HTTPError as e: + error_msg = f"HTTP error during updating training progress session: {e.response.status_code} - {e.response.text}" + logger.error(error_msg, model_id=self.model_id, status_code=e.response.status_code) + raise + + except requests.RequestException as e: + error_msg = f"Network error during updating training progress session: {str(e)}" + logger.error(error_msg, model_id=self.model_id) + raise + + except Exception as e: + error_msg = f"Unexpected error during updating training progress session: {str(e)}" + logger.error(error_msg, model_id=self.model_id) + raise + + def update_training_results(self, training_results, model_s3_location): + """ + Update training results in the database. + This function should be implemented to update the training results in the database. + """ + logger.info("Updating training results in the database") + + payload = { + "modelId": self.model_id, + "trainingResults": training_results, + "modelS3Location": model_s3_location + } + + logger.info(f"Prepared deployment payload {payload}") + + try: + # Make request to deployment endpoint + response = requests.post( + url=UPDATE_MODEL_TRAINING_STATUS_ENDPOINT, + json=payload, + headers={"Content-Type": "application/json"}) + + logger.info(f"Update model endpoint response - {response.status_code} - {response.text}") + + # Check if request was successful + response.raise_for_status() + + logger.info("Model training data pushed to database successfully") + + return response.json() + + except requests.HTTPError as e: + error_msg = f"HTTP error during model deployment: {e.response.status_code} - {e.response.text}" + logger.error(error_msg, model_id=self.model_id, + current_env=self.current_deployment_platform, target_env=self.target_deployment_platform, + status_code=e.response.status_code) + raise + + except requests.RequestException as e: + error_msg = f"Network error during model deployment: {str(e)}" + logger.error(error_msg, model_id=self.model_id, + current_env=self.current_deployment_platform, target_env=self.target_deployment_platform) + raise + + except Exception as e: + error_msg = f"Unexpected error during model deployment: {str(e)}" + logger.error(error_msg, model_id=self.model_id, + current_env=self.current_deployment_platform, target_env=self.target_deployment_platform) + raise + + def calculate_combined_score(self, accuracies, f1_scores): """Calculate combined score using weighted average""" if not accuracies or not f1_scores: @@ -102,6 +282,13 @@ def train(self): logger.info("ENTERING UNIFIED TRAINING FUNCTION") logger.info(f"DEPLOYMENT PLATFORM - {self.current_deployment_platform}") + trainer.update_training_progression_session( + training_status=INITIATING_TRAINING_PROGRESS_STATUS, + training_message=INITIATING_TRAINING_PROGRESS_MESSAGE, + progress_percentage=INITIATING_TRAINING_PROGRESS_PERCENTAGE, + process_complete=False) + + # Initialize services s3_ferry = S3Ferry() @@ -117,6 +304,12 @@ def train(self): # Generate all model variants to train model_variants = [] + trainer.update_training_progression_session( + training_status=TRAINING_IN_PROGRESS_PROGRESS_STATUS, + training_message=TRAINING_IN_PROGRESS_PROGRESS_MESSAGE, + progress_percentage=TRAINING_IN_PROGRESS_PROGRESS_PERCENTAGE, + process_complete=False) + # Add standard models for base_model in self.model_types: @@ -353,11 +546,7 @@ def train(self): logger.info( f"INITIATING DEPLOYMENT OF {best_variant['name']} TO {self.current_deployment_platform}" ) - # self.deploy_model( - # best_model_info=best_variant, - # progress_session_id=session_id, - # dg_id=dg_id, - # ) + logger.info("=" * 60) logger.info("UNIFIED TRAINING COMPLETED SUCCESSFULLY") @@ -366,11 +555,30 @@ def train(self): logger.info(f"VARIANTS TRAINED: {len(all_results)}") logger.info("=" * 60) + logger.info("Updating training results to database") + self.update_training_results( + training_results=all_results, + model_s3_location=s3_save_location) + + trainer.update_training_progression_session( + training_status=DEPLOYING_MODEL_PROGRESS_STATUS, + training_message=DEPLOYING_MODEL_PROGRESS_MESSAGE, + progress_percentage=DEPLOYING_MODEL_PROGRESS_PERCENTAGE, + process_complete=False) + + except Exception as e: import traceback logger.error(f"EXCEPTION IN UNIFIED MODEL TRAINER: {e}") logger.error(traceback.format_exc()) + + trainer.update_training_progression_session( + training_status=TRAINING_FAILED_STATUS, + training_message=TRAINING_FAILED_STATUS_MESSAGE, + progress_percentage=TRAINING_FAILED_PROGRESS_PERCENTAGE, + process_complete=False) + raise def deploy(self): @@ -387,7 +595,7 @@ def deploy(self): """ - + #TODO - Add sessionId here to pass session ID to the deployment endpoint logger.info("Starting model deployment") # Prepare request payload @@ -500,5 +708,7 @@ def parse_args(): progress_session_id=progress_session_id, target_deployment_platform=target_deployment_platform, ) + + trainer.create_training_progress_session() trainer.train() trainer.deploy() \ No newline at end of file From fb53e71a9a3a554e1be0c82ad2073ee93ef56fb1 Mon Sep 17 00:00:00 2001 From: Thiru Dinesh Date: Thu, 31 Jul 2025 04:56:14 +0530 Subject: [PATCH 186/195] fixed minor issue --- src/model-training/model_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/model-training/model_trainer.py b/src/model-training/model_trainer.py index 59ccd83f..8ed8b61e 100644 --- a/src/model-training/model_trainer.py +++ b/src/model-training/model_trainer.py @@ -106,7 +106,7 @@ def create_training_progress_session(self): "modelName": self.model_name, "majorVersion": self.major_version, "minorVersion": self.minor_version, - "latest": self.latest(), + "latest": self.latest, } logger.info(f"Prepared training progress session payload {payload}") From 5358e9c8359f5ecdae06966c3e88e95ca9cb3e91 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 31 Jul 2025 09:53:53 +0530 Subject: [PATCH 187/195] refactor: Remove commented-out formatDataUrlsResponse and enhance initiateDataGeneration with versioning details --- .../POST/agencies/data/resync.yml | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/DSL/Ruuter.private/global-classifier/POST/agencies/data/resync.yml b/DSL/Ruuter.private/global-classifier/POST/agencies/data/resync.yml index 5a3dd3cc..dfa286e7 100644 --- a/DSL/Ruuter.private/global-classifier/POST/agencies/data/resync.yml +++ b/DSL/Ruuter.private/global-classifier/POST/agencies/data/resync.yml @@ -51,25 +51,15 @@ forwardToFormatting: next: end next: initiateDataGeneration -# formatDataUrlsResponse: -# call: http.post -# args: -# url: "[#GLOBAL_CLASSIFIER_DMAPPER]/hbs/global-classifier/format_data_urls" -# headers: -# type: json -# body: -# agencies: ${importResult.response.body.response} -# datasetId: ${datasetResponse.response.body.response[0].id} -# result: dataUrlsResponse -# next: returnImportResult - initiateDataGeneration: call: http.post args: url: "[#GLOBAL_CLASSIFIER_RUUTER_PUBLIC]/data/generate" body: - datasetId: ${datasetResponse.response.body.response[0].id} presignedUrls: ${importResult.response.body.response} + datasetId: ${datasetResponse.response.body.response[0].id} + majorVersion: ${datasetResponse.response.body.response[0].major} + minorVersion: ${datasetResponse.response.body.response[0].minor} result: dataGenerationResponse next: returnImportResult From 8c3b1b1a2c54ede838f48be54f87050b1eef7903 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 31 Jul 2025 10:39:58 +0530 Subject: [PATCH 188/195] fixes --- .../POST/get-latest-dataset.sql | 2 +- GUI/src/pages/TestModel/index.tsx | 34 ++++++++++--------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/DSL/Resql/global-classifier/POST/get-latest-dataset.sql b/DSL/Resql/global-classifier/POST/get-latest-dataset.sql index 968fe814..6bf4a871 100644 --- a/DSL/Resql/global-classifier/POST/get-latest-dataset.sql +++ b/DSL/Resql/global-classifier/POST/get-latest-dataset.sql @@ -1,4 +1,4 @@ -SELECT id, major +SELECT id, major, minor FROM public.dataset_versions ORDER BY created_at DESC LIMIT 1; \ No newline at end of file diff --git a/GUI/src/pages/TestModel/index.tsx b/GUI/src/pages/TestModel/index.tsx index 7b6d1ffb..5d7e259a 100644 --- a/GUI/src/pages/TestModel/index.tsx +++ b/GUI/src/pages/TestModel/index.tsx @@ -70,22 +70,24 @@ const TestModel: FC = () => { }, }); - const processClassificationResult = (result: any) => { - if (!result || !Array.isArray(result) || result.length === 0) return []; - - const resultData = result[0]; // Get the first (and likely only) object - - return Object.entries(resultData).map(([key, value]: [string, any]) => { - const agencyName = Object.keys(value)[0]; - const confidence = Object.values(value)[0] as number; - - return { - rank: parseInt(key), - agencyName: agencyName.replace(/_/g, ' '), // Replace underscores with spaces - confidence: confidence - }; - }).sort((a, b) => b.confidence - a.confidence); // Sort by confidence descending - }; +const processClassificationResult = (result: any) => { + if (!result || !Array.isArray(result) || result.length === 0) return []; + + // Get the first array (which contains the classification results) + const resultData = result[0]; + + // Check if resultData is an array of classification objects + if (!Array.isArray(resultData)) return []; + + return resultData.map((item: any, index: number) => { + return { + rank: index + 1, + agencyId: item.agency_id, + agencyName: item.agency_name?.replace(/_/g, ' ') || `Agency ${item.agency_id}`, + confidence: item.confidence || 0 + }; + }).sort((a, b) => b.confidence - a.confidence); // Sort by confidence descending +}; const processedResults = classificationResult ? processClassificationResult(classificationResult) : []; From ab4531a4a415ac3f8038863c3471231ea7e6e81f Mon Sep 17 00:00:00 2001 From: Thiru Dinesh Date: Thu, 31 Jul 2025 12:03:13 +0530 Subject: [PATCH 189/195] pushed changes --- src/model-training/model_trainer.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/model-training/model_trainer.py b/src/model-training/model_trainer.py index 8ed8b61e..bed7bc40 100644 --- a/src/model-training/model_trainer.py +++ b/src/model-training/model_trainer.py @@ -123,7 +123,6 @@ def create_training_progress_session(self): logger.info(f"Create training progress session response - {response.status_code} - {response.text}") # Check if request was successful - response.raise_for_status() logger.info("Training progress session created successfully") @@ -563,8 +562,8 @@ def train(self): trainer.update_training_progression_session( training_status=DEPLOYING_MODEL_PROGRESS_STATUS, training_message=DEPLOYING_MODEL_PROGRESS_MESSAGE, - progress_percentage=DEPLOYING_MODEL_PROGRESS_PERCENTAGE, - process_complete=False) + progress_percentage=100, + process_complete=True) except Exception as e: @@ -623,6 +622,13 @@ def deploy(self): response.raise_for_status() logger.info("Model deployment completed successfully") + + trainer.update_training_progression_session( + training_status=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_STATUS, + training_message=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_MESSAGE, + progress_percentage=MODEL_TRAINED_AND_DEPLOYED_PROGRESS_PERCENTAGE, + process_complete=True) + return response.json() From 4ec3f22190f5f2edf593f99f3b350c2782d8b5d7 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 31 Jul 2025 15:26:47 +0530 Subject: [PATCH 190/195] new training resluts added --- .../molecules/DataModelCard/index.tsx | 2 +- .../molecules/DataModelForm/index.tsx | 2 +- .../TrainingResults/TrainingResults.scss | 289 +++++++++++++----- .../molecules/TrainingResults/index.tsx | 152 ++++++--- GUI/src/pages/DataModels/index.tsx | 53 ++-- GUI/src/pages/Datasets/index.tsx | 6 +- GUI/src/pages/IntegratedAgencies/index.tsx | 4 +- GUI/src/pages/TestModel/index.tsx | 3 +- GUI/src/pages/ViewDataset/index.tsx | 137 ++++----- GUI/translations/en/common.json | 1 + 10 files changed, 427 insertions(+), 222 deletions(-) diff --git a/GUI/src/components/molecules/DataModelCard/index.tsx b/GUI/src/components/molecules/DataModelCard/index.tsx index 00e97fcc..c14b3064 100644 --- a/GUI/src/components/molecules/DataModelCard/index.tsx +++ b/GUI/src/components/molecules/DataModelCard/index.tsx @@ -146,7 +146,7 @@ const DataModelCard: FC> = ({ content: (
    {results ? ( - + ) : ( - {showTrainingResults && trainingResults && } + {showTrainingResults && trainingResults && }
    {t('dataModels.dataModelForm.deploymentPlatform')}{' '} diff --git a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss index 48e8adfc..ad1bf4b1 100644 --- a/GUI/src/components/molecules/TrainingResults/TrainingResults.scss +++ b/GUI/src/components/molecules/TrainingResults/TrainingResults.scss @@ -1,101 +1,224 @@ .results-wrapper { - padding: 1rem; - background-color: #fff; -} + padding: .5rem; -.best-model-header { - font-size: 1rem; - font-weight: 500; - margin-bottom: 1rem; - color: #4D4F5D -} + .best-model-header { + color: #2e7d32; + margin-bottom: 1rem; + font-size: 1.25rem; + font-weight: 600; + } -.section-title { - font-size: .9rem; - margin-bottom: 20px; - color: #4D4F5D; -} + .best-model-summary { + display: flex; + gap: 2rem; + margin-bottom: 1.5rem; + padding: 1rem; + background-color: #f8fff8; + border: 1px solid #4caf50; + border-radius: 8px; -.model-section { - margin-bottom: 1.5rem; -} + .summary-metric { + display: flex; + flex-direction: column; + align-items: center; -.model-name { - font-size: .9rem; - margin-bottom: 1rem; - margin-top: 1rem; - color: #4D4F5D -} + .metric-label { + font-size: 0.875rem; + color: #666; + margin-bottom: 0.25rem; + } -.metrics-grid { - display: flex; - flex-wrap: wrap; - gap: 16px; -} + .metric-value { + font-size: 1.125rem; + font-weight: 600; + color: #2e7d32; + } + } + } -.metric-card { - background-color: #f7f7f7; - border: 1px solid #ddd; - border-radius: 6px; - padding: 12px 16px; - width: 200px; - box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); -} + .section-title { + margin-bottom: 1rem; + color: #333; + font-size: 1.125rem; + } -.metric-row { - display: flex; - justify-content: space-between; - margin-bottom: 0; - font-size: 14px; - width: auto; - padding: .5rem 0rem; -} + .model-section { + margin-bottom: 1.2rem; + border: 1px solid #e0e0e0; + border-radius: 8px; + padding: 1rem 2rem; + background-color: #fff; -.metric-label { - font-weight: 600; - color: #444; -} + &.best-model { + border-color: #4caf50; + background-color: #f8fff8; + } -.header-row { - display: flex; - justify-content: space-between; - margin-bottom: 1rem; -} + .model-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 1rem; -.header-classes { - width: 30%; - font-weight: 600; - color: #444; -} + .model-name { + display: flex; + align-items: center; + gap: 0.5rem; + margin: 0; + font-size: 1.125rem; + color: #333; -.header-metrics { - display: grid; - grid-template-columns: repeat(4, 7rem); - gap: 1rem; - width: 70%; - font-weight: 600; - color: #444; -} + .best-badge { + background-color: #4caf50; + color: white; + padding: 0.25rem 0.5rem; + border-radius: 4px; + font-size: 0.75rem; + font-weight: 500; + } + } -.hr-divider { - border: none; - border-top: 1px solid #D1D1D1; - margin: 0; -} + .model-summary-stats { + display: flex; + gap: 1rem; + font-size: 0.875rem; + color: #666; -.metric-class { - width: 30%; -} + span { + white-space: nowrap; + } + } + } + + .model-metrics-card { + border: 1px solid #ddd; + border-radius: 6px; + overflow: hidden; + margin-bottom: 1rem; + + .header-row { + display: flex; + background-color: #f5f5f5; + padding: 0.75rem; + font-weight: 600; + color: #333; + + .header-classes { + flex: 1; + } + + .header-metrics { + display: flex; + gap: 2rem; + min-width: 200px; -.metric-values { - display: grid; - grid-template-columns: repeat(4, 7rem); - gap: 1rem; - width: 70%; + div { + text-align: center; + flex: 1; + } + } + } + + .hr-divider { + margin: 0; + border: none; + border-top: 1px solid #ddd; + } + + .metric-row { + display: flex; + padding: 0.75rem; + border-bottom: 1px solid #f0f0f0; + + &:last-child { + border-bottom: none; + } + + &.average-row { + background-color: #f9f9f9; + font-weight: 600; + + .average-label { + color: #555; + } + + .metric-value.average { + color: #2e7d32; + } + } + + .metric-class { + flex: 1; + color: #333; + } + + .metric-values { + display: flex; + gap: 2rem; + min-width: 200px; + + .metric-value { + text-align: center; + flex: 1; + color: #666; + + &.f1 { + color: #1976d2; + } + + &.accuracy { + color: #388e3c; + } + } + } + } + } + + .model-details { + display: flex; + gap: 2rem; + flex-wrap: wrap; + + .detail-item { + display: flex; + gap: 0.5rem; + + .detail-label { + font-weight: 500; + color: #666; + } + + .detail-value { + color: #333; + } + } + } + } } -.model-metrics-card { - border: 1px solid #E6E6E6; - padding: .5rem; - border-radius: 5px; +@media (max-width: 768px) { + .results-wrapper { + .best-model-summary { + flex-direction: column; + gap: 1rem; + } + + .model-section { + .model-header { + flex-direction: column; + align-items: flex-start; + gap: 0.5rem; + + .model-summary-stats { + flex-direction: column; + gap: 0.25rem; + } + } + + .model-details { + flex-direction: column; + gap: 0.5rem; + } + } + } } \ No newline at end of file diff --git a/GUI/src/components/molecules/TrainingResults/index.tsx b/GUI/src/components/molecules/TrainingResults/index.tsx index 800a040d..e5c32849 100644 --- a/GUI/src/components/molecules/TrainingResults/index.tsx +++ b/GUI/src/components/molecules/TrainingResults/index.tsx @@ -1,45 +1,123 @@ import { ModelResultsProps } from 'types/dataModels'; import './TrainingResults.scss'; -const ModelResults: React.FC = ({ models }) => { +interface TrainingResultModel { + avg_f1: number; + avg_accuracy: number; + combined_score: number; + metrics: [string[], number[], number[]]; // [classes, accuracies, f1_scores] + variant: { + name: string; + type: string; + base_model: string; + ood_method: string; + full_model_name: string; + confidence_scaling: boolean; + uncertainty_strategy: string; + human_handoff_threshold: number; + }; + model_path: string; +} + +const ModelResults: React.FC<{ models: TrainingResultModel[] }> = ({ models }) => { + console.log(models); + + // Find best performing model by avg_f1 + const bestModel = models?.reduce((best, current) => + current.avg_f1 > best.avg_f1 ? current : best + ); + + // Sort models by avg_f1 in descending order + const sortedModels = models?.sort((a, b) => b.avg_f1 - a.avg_f1) || []; + + // Check if there are multiple models + const hasMultipleModels = models && models.length > 1; + + const formatScore = (score: number) => (score * 100).toFixed(2) + '%'; + + const processModelMetrics = (model: TrainingResultModel) => { + const [classes, accuracies, f1Scores] = model.metrics; + + return classes.map((className, index) => ({ + className: className.replace(/_/g, ' '), + accuracy: accuracies[index], + f1: f1Scores[index] + })); + }; + + if (!models || models.length === 0) { return ( -
    -

    - Best Performing Model - {models?.[0]?.model_type || "N/A"} -

    -

    Training Results

    - - {models?.map((model, idx) => ( -
    -
    {model.model_type}
    - -
    -
    -
    Classes
    -
    -
    F1
    -
    Recall
    -
    Accuracy
    -
    Precision
    -
    -
    -
    - {Object.entries(model?.class_metrics).map(([className, metrics]) => ( -
    -
    {className}
    -
    -
    {metrics.f1}
    -
    {metrics.recall}
    -
    {metrics.accuracy}
    -
    {metrics.precision}
    -
    -
    - ))} -
    -
    - ))} -
    +
    +

    No training results available

    +
    ); + } + + return ( +
    + {hasMultipleModels && ( + <> +

    + Best Performing Model - {bestModel?.variant?.name || "N/A"} +

    +
    +
    + Average F1: + {formatScore(bestModel?.avg_f1 || 0)} +
    +
    + Average Accuracy: + {formatScore(bestModel?.avg_accuracy || 0)} +
    +
    + Combined Score: + {formatScore(bestModel?.combined_score || 0)} +
    +
    + + )} + +

    Training Results

    + + {sortedModels.map((model, idx) => { + const processedMetrics = processModelMetrics(model); + const isBest = hasMultipleModels && model === bestModel; + + return ( +
    +
    +
    + {model.variant.name} + {/* Only show best badge when there are multiple models */} + {hasMultipleModels && isBest && Best} +
    +
    + +
    +
    +
    Classes
    +
    +
    F1 Score
    +
    Accuracy
    +
    +
    +
    + + {processedMetrics.map((metric, metricIdx) => ( +
    +
    {metric.className}
    +
    +
    {formatScore(metric.f1)}
    +
    {formatScore(metric.accuracy)}
    +
    +
    + ))} +
    +
    + ); + })} +
    + ); }; export default ModelResults; \ No newline at end of file diff --git a/GUI/src/pages/DataModels/index.tsx b/GUI/src/pages/DataModels/index.tsx index 249b8e5f..3ea98ac7 100644 --- a/GUI/src/pages/DataModels/index.tsx +++ b/GUI/src/pages/DataModels/index.tsx @@ -41,7 +41,7 @@ const DataModels: FC = () => { queryFn: () => getDataModelsOverview(pageIndex, filters.modelStatus, filters.trainingStatus, filters.deploymentEnvironment, filters.sort), }); - const { data: prodDataModel, isLoading: isProdDataModelLoading } = useQuery({ + const { data: prodDataModel, isLoading: isProdDataModelLoading } = useQuery({ queryKey: dataModelsQueryKeys.GET_PROD_DATA_MODEL(), queryFn: () => getProductionDataModel(), }); @@ -175,7 +175,7 @@ const DataModels: FC = () => {
    - {prodDataModel != null &&
    + {prodDataModel != null &&

    Deployed Model

    { trainingStatus={prodDataModel.trainingStatus} modelStatus={prodDataModel?.modelStatus} deploymentEnv={prodDataModel?.deploymentEnv} - results={prodDataModel?.trainingResults ?? null} + results={prodDataModel?.trainingResults ?? null} />
    } -

    Other Data Models

    {dataModelsData?.length > 0 ? ( -
    - {dataModelsData?.map( - (model: DataModelResponse, index: number) => { - return ( - - ); - } - )} +

    Other Data Models

    +
    + + {dataModelsData?.map( + (model: DataModelResponse, index: number) => { + return ( + + ); + } + )} +
    + ) : ( )} diff --git a/GUI/src/pages/Datasets/index.tsx b/GUI/src/pages/Datasets/index.tsx index d003c8dd..e7ed28a8 100644 --- a/GUI/src/pages/Datasets/index.tsx +++ b/GUI/src/pages/Datasets/index.tsx @@ -21,7 +21,6 @@ const Datasets: FC = () => { const [sortOption, setSortOption] = useState("created_at desc"); const [searchTerm, setSearchTerm] = useState('all'); - const { data: datasets, isLoading } = useQuery({ queryKey: datasetQueryKeys.DATASET_OVERVIEW(pageIndex, sortOption), queryFn: () => getDatasetsOverview(pageIndex, sortOption), @@ -29,7 +28,6 @@ const Datasets: FC = () => { const pageCount = datasets?.[0]?.totalPages ?? 1; - const handleSearch = (term: string) => { // Set the search term to 'all' if empty, otherwise use the provided term setSearchTerm(term.trim() === '' ? 'all' : term); @@ -113,8 +111,8 @@ const Datasets: FC = () => {
    )} - {!isLoading && datasets?.response?.data?.length === 0 && ( - + {!isLoading && datasets?.length === 0 && ( + )} {
    ) } - {!isLoading && agencies?.response?.data?.length===0 && ( - + {!isLoading && agencies?.length===0 && ( + )} {
    diff --git a/GUI/src/pages/ViewDataset/index.tsx b/GUI/src/pages/ViewDataset/index.tsx index 2c49f6f0..9f1c647a 100644 --- a/GUI/src/pages/ViewDataset/index.tsx +++ b/GUI/src/pages/ViewDataset/index.tsx @@ -36,13 +36,19 @@ const ViewDataset = () => { const [editedRows, setEditedRows] = useState([]); const [selectedAgencyId, setSelectedAgencyId] = useState("all"); const [originalDataset, setOriginalDataset] = useState([]); + const [updatePayload, setUpdatePayload] = useState<{ + updatedDataItems: SelectedRowPayload[]; + deletedRows: (string | number)[]; + updatedRowsLength: number; + deletedRowsLength: number; + } | null>(null); const navigate = useNavigate(); - const { data: metadata, isLoading: isMetadataLoading } = useQuery({ + const { data: metadata, isLoading: isMetadataLoading,refetch: refetchMetadata } = useQuery({ queryKey: datasetQueryKeys.GET_META_DATA(datasetVersionId ?? 0), queryFn: () => getDatasetMetadata(datasetVersionId ?? 0), }); - const { data: dataset, isLoading: datasetIsLoading } = useQuery({ + const { data: dataset, isLoading: datasetIsLoading, refetch: refetchDataset } = useQuery({ queryKey: datasetQueryKeys.GET_DATA_SETS( datasetVersionId ?? 0, selectedAgencyId, @@ -228,66 +234,48 @@ const ViewDataset = () => { const updateMutation = useMutation({ mutationFn: updateDataset, - onSuccess: () => { - - // Set a 3-second timeout to show the loading message + onSuccess: async () => { setTimeout(() => { - - // Show the success dialog after 3 seconds - // open({ - // title: t('datasets.detailedView.datasetUpdateSuccessfulTitle'), - // content: t('datasets.detailedView.datasetUpdateSuccessfulDesc'), - // footer: ( - //
    - // - //
    - // ) - // }); setIsUpdating(false); setIsProgressModalOpen(false); setEditedRows([]); setDeletedRowIds([]); - }, 3000); // 3 seconds delay - - + }, 3000); + await Promise.all([ + refetchMetadata(), + refetchDataset() + ]); }, onError: () => { setIsUpdating(false); setIsProgressModalOpen(false); setEditedRows([]); - setDeletedRowIds([]); open({ + setDeletedRowIds([]); + open({ title: t('datasets.detailedView.datasetUpdateUnsuccessfulTitle'), content: t('datasets.detailedView.datasetUpdateUnsuccessfulDesc'), }); }, }); - let payload: { - updatedDataItems: SelectedRowPayload[]; - deletedRows: (string | number)[]; - updatedRowsLength: number; - deletedRowsLength: number; - } = { updatedDataItems: [], deletedRows: [], updatedRowsLength: 0, deletedRowsLength: 0 }; - - const minorUpdate = () => { + + const minorUpdate = () => { setIsProgressModalOpen(true); + // Create payload inside the function const updatedDataItems: SelectedRowPayload[] = editedRows.filter((row) => { return !deletedRowIds.includes(row.itemId); }); - payload = { + const updatePayload = { updatedDataItems, deletedRows: deletedRowIds, updatedRowsLength: updatedDataItems.length, deletedRowsLength: deletedRowIds.length, }; + // Store payload in a ref or state if you need to access it in the dialog + setUpdatePayload(updatePayload); }; const handleDeleteDataset = () => { @@ -442,42 +430,55 @@ const ViewDataset = () => { /> )} - {isProgressModalOpen && ( + {isProgressModalOpen && ( setIsUpdateModalOpen(false)} - isOpen={ - isProgressModalOpen + title={t('datasets.detailedView.confirmUpdateDatasetTitle')} + onClose={() => setIsProgressModalOpen(false)} + isOpen={isProgressModalOpen} + footer={ +
    + + +
    } - footer={
    - - -
    } - > - {isUpdating ?
    -

    {t('datasets.detailedView.dataBeingUpdated')}

    -
    -
    :

    {t('datasets.detailedView.editDataRowDesc')}

    } - + > + {isUpdating ? ( +
    +

    {t('datasets.detailedView.dataBeingUpdated')}

    +
    +
    +
    +
    + ) : ( +
    +

    {t('datasets.detailedView.confirmUpdateDatasetDesc')}

    + {updatePayload && ( +
    +

    Items to update: {updatePayload.updatedRowsLength}

    +

    Items to delete: {updatePayload.deletedRowsLength}

    +
    + )} +
    + )}
    )}
    diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index a23e1b21..8bfc8b8b 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -116,6 +116,7 @@ "integratedAgencies": { "title": "Integrated Clients", "search": "Search client", + "noClients": "No clients found", "sortOptions": { "agencyAsc": "Client Name: A-Z", "agencyDesc": "Client Name: Z-A", From e34a025a38881c35f5f1564d8fc198e666bf1115 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 31 Jul 2025 15:44:56 +0530 Subject: [PATCH 191/195] Remove sample data inserts from mock global classifier SQL script --- .../mock-global-classifier-script-v7-centops-ckb.sql | 8 -------- 1 file changed, 8 deletions(-) diff --git a/DSL/Liquibase/changelog/mock-global-classifier-script-v7-centops-ckb.sql b/DSL/Liquibase/changelog/mock-global-classifier-script-v7-centops-ckb.sql index 9825a2a6..1668065e 100644 --- a/DSL/Liquibase/changelog/mock-global-classifier-script-v7-centops-ckb.sql +++ b/DSL/Liquibase/changelog/mock-global-classifier-script-v7-centops-ckb.sql @@ -14,11 +14,3 @@ CREATE TABLE public.mock_ckb ( created_at TIMESTAMP NOT NULL DEFAULT NOW() ); -INSERT INTO public.mock_centops (agency_id, agency_name, created_at) VALUES - ('1', 'ID.ee', NOW()), - ('2', 'Politsei-_ja_Piirivalveamet', NOW()); - -INSERT INTO public.mock_ckb (agency_id, agency_data_hash, data_url, created_at) VALUES - ('1', 'id_hash', 'http://minio:9000/ckb/agencies/ID.ee/ID.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250704%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250704T044232Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=58fbc63dba44a5fc0a55cad67b24665bf22a9bf20ebd98484e5ca9895db76c24', NOW()), - ('2', 'Politsei_hash', 'http://minio:9000/ckb/agencies/Politsei-_ja_Piirivalveamet/Politsei-_ja_Piirivalveamet.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20250704%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250704T044232Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=ab69f524ae6a0ba1e64ccaba0b355b4944f6216117d589e6f987b3f4679b0e06', NOW()); - From 23b73d614f92218f8c5d683a726519b8db30a58d Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 31 Jul 2025 16:56:37 +0530 Subject: [PATCH 192/195] Add loading indicator to Load Model button in TestModel component --- GUI/src/pages/TestModel/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUI/src/pages/TestModel/index.tsx b/GUI/src/pages/TestModel/index.tsx index b19c4e7a..2f980dfd 100644 --- a/GUI/src/pages/TestModel/index.tsx +++ b/GUI/src/pages/TestModel/index.tsx @@ -115,7 +115,7 @@ const processClassificationResult = (result: any) => { }} value={testModel?.modelId === null ? t('testModels.errors.modelNotExist') : undefined} defaultValue={testModel?.modelId ?? undefined} /> -
    {modelLoadingStatus}
    From 46e632e41c291d71bfba0f2b0ff905040ee0c443 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 4 Aug 2025 14:10:30 +0530 Subject: [PATCH 193/195] page size selector added --- GUI/src/components/DataTable/DataTable.scss | 5 +++ GUI/src/components/DataTable/index.tsx | 38 +++++++++++++++++++-- GUI/src/pages/ViewDataset/index.tsx | 12 ++++--- GUI/src/services/datasets.ts | 5 +-- GUI/src/utils/queryKeys.ts | 2 +- GUI/translations/en/common.json | 4 ++- 6 files changed, 56 insertions(+), 10 deletions(-) diff --git a/GUI/src/components/DataTable/DataTable.scss b/GUI/src/components/DataTable/DataTable.scss index fc844ad8..619ae588 100644 --- a/GUI/src/components/DataTable/DataTable.scss +++ b/GUI/src/components/DataTable/DataTable.scss @@ -21,6 +21,11 @@ border: solid 1px get-color(black-coral-1); } + &__page-size-selector { + display: flex; + gap: .7rem; + } + thead, tbody { width: 100%; diff --git a/GUI/src/components/DataTable/index.tsx b/GUI/src/components/DataTable/index.tsx index 5673a6ad..9a83f4a1 100644 --- a/GUI/src/components/DataTable/index.tsx +++ b/GUI/src/components/DataTable/index.tsx @@ -52,7 +52,9 @@ type DataTableProps = { pagesCount?: number; meta?: TableMeta; dropdownFilters?: DropdownFilterConfig[]; - onSelect?: (value: string | number) => void | undefined// Callback for dropdown filter selection + onSelect?: (value: string | number) => void | undefined + showPageSizeSelector?: boolean; + pageSizeOptions?: number[]; }; @@ -116,7 +118,9 @@ const DataTable: FC = ( pagesCount, meta, dropdownFilters, - onSelect + onSelect, + showPageSizeSelector = false, + pageSizeOptions = [10, 20, 50, 100] }, ) => { const id = useId(); @@ -157,6 +161,15 @@ const DataTable: FC = ( pageCount: isClientSide ? undefined : pagesCount, }); + const handlePageSizeChange = (newPageSize: number) => { + if (setPagination && pagination) { + setPagination({ + pageIndex: 0, + pageSize: newPageSize, + }); + } + }; + return (
    @@ -221,6 +234,27 @@ const DataTable: FC = (
    {pagination && (
    + {showPageSizeSelector && ( +
    + + {t('global.showEntries') || 'Show'} + + + + {t('global.entries') || 'entries'} + +
    + )} {(table.getPageCount() * table.getState().pagination.pageSize) > table.getState().pagination.pageSize && (
    + +
    + ), + }); + }; + useEffect(() => { if (dataset) { setOriginalDataset(prev => { @@ -125,7 +182,8 @@ const ViewDataset = () => { onClick={() => { open({ title: t('datasets.detailedView.deleteDataRowTitle') ?? '', - content:

    {t('datasets.detailedView.deleteDataRowDesc')}

    , + content:

    {t('datasets.detailedView.deleteDataRowDesc')}

    + {t('datasets.detailedView.bulkDeleteDesc2')}
    , footer: (
    )}
    + {/* Bulk actions */} + {selectedRowsCount > 0 && ( +
    + + {selectedRowsCount} {t('datasets.detailedView.itemsSelected') || 'items selected'} + + +
    + )}
    {datasetIsLoading && } {!datasetIsLoading && updatedDataset && updatedDataset?.length > 0 && ( @@ -360,6 +456,8 @@ const ViewDataset = () => { data={updatedDataset} columns={dataColumns as ColumnDef[]} pagination={pagination} + rowSelection={rowSelection} + setRowSelection={setRowSelection} dropdownFilters={[ { columnId: 'agencyName', @@ -384,7 +482,7 @@ const ViewDataset = () => { state.pageIndex === pagination.pageIndex && state.pageSize === pagination.pageSize ) - return; + return; setPagination(state); }} pagesCount={dataset?.[0]?.totalPages ?? 0} diff --git a/GUI/translations/en/common.json b/GUI/translations/en/common.json index 1f520212..a06f718e 100644 --- a/GUI/translations/en/common.json +++ b/GUI/translations/en/common.json @@ -49,14 +49,15 @@ "failed": "Failed", "sessionTimeOutTitle": "You session has been ended!", "sessionTimeOutDesc": "Extend your session or sign out from application in {{seconds}}", - "close":"Close", - "proceed":"Proceed", - "maxFileSize":"File size should not exceed 20 MB.", + "close": "Close", + "proceed": "Proceed", + "maxFileSize": "File size should not exceed 20 MB.", "select": "-Select-", "replace": "Replace", "clearFilters": "Clear Filters", "showEntries": "Show", - "entries": "records" + "entries": "records", + "deleteSelected": "Delete Selection" }, "menu": { "userManagement": "User Management", @@ -215,7 +216,7 @@ "editDataRowTitle": "Edit Data Record", "editDataRowDesc": "Updates you make to the data record will be saved in the dataset", "deleteDataRowTitle": "Delete Data Record", - "deleteDataRowDesc": "Deleted data records will be removed from the dataset", + "deleteDataRowDesc": "Are you sure you want to delete this data record?", "data": "Data", "clientName": "Client Name", "patchUpdateBanner": "You have edited individual items in the dataset which are not saved. Please save the changes to apply", @@ -238,6 +239,11 @@ "itemsUpdated": "items updated", "itemsDeleted": "items deleted", "dataBeingUpdated": "Data is being updated...", + "itemsSelected": "items selected", + "bulkDeleteTitle": "Delete Selected Items", + "bulkDeleteDesc1": "Are you sure you want to delete the selected items?", + "bulkDeleteDesc2": "Note : This deletion will not be affected in the original dataset until you click on Save Changes.", + "bulkDeleteSuccessTitle": "Items Deleted Successfully", "table": { "id": "Item ID", "data": "Data", @@ -467,7 +473,6 @@ "topPrediction": "Top Prediction", "allPredictions": "All Predictions", "classificationFailed": "Classification failed. Please try again." - }, "optionLists": { "text": "Text", From c1c766c0c2a59281b80a2c390c393b7d95b9a019 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Thu, 14 Aug 2025 14:47:38 +0530 Subject: [PATCH 195/195] resolve pr comments --- GUI/src/components/DataTable/index.tsx | 4 ++-- GUI/src/pages/ViewDataset/index.tsx | 2 +- GUI/src/utils/queryKeys.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GUI/src/components/DataTable/index.tsx b/GUI/src/components/DataTable/index.tsx index 2a7423d9..b01e0630 100644 --- a/GUI/src/components/DataTable/index.tsx +++ b/GUI/src/components/DataTable/index.tsx @@ -123,7 +123,7 @@ const DataTable: FC = ( onSelect, showPageSizeSelector = false, pageSizeOptions = [10, 20, 50, 100], - rowSelection, + rowSelection, setRowSelection, }, ) => { @@ -149,7 +149,7 @@ const table = useReactTable({ onGlobalFilterChange: setGlobalFilter, onColumnVisibilityChange: setColumnVisibility, globalFilterFn: fuzzyFilter, - enableRowSelection: true, + enableRowSelection: !!setRowSelection, onRowSelectionChange: setRowSelection ? (updaterOrValue) => { if (typeof updaterOrValue === 'function') { diff --git a/GUI/src/pages/ViewDataset/index.tsx b/GUI/src/pages/ViewDataset/index.tsx index 6c466932..03bc6548 100644 --- a/GUI/src/pages/ViewDataset/index.tsx +++ b/GUI/src/pages/ViewDataset/index.tsx @@ -86,7 +86,7 @@ const ViewDataset = () => { content: (

    {t('datasets.detailedView.bulkDeleteDesc1') || 'Are you sure you want to delete the selected items?'}

    -

    {t('datasets.detailedView.bulkDeleteDesc2') || 'items selected'}

    +

    {t('datasets.detailedView.bulkDeleteDesc2') || 'Note : This deletion will not be affected in the original dataset until you click on Save Changes.'}

    ), footer: ( diff --git a/GUI/src/utils/queryKeys.ts b/GUI/src/utils/queryKeys.ts index 8dc022f0..27affe50 100644 --- a/GUI/src/utils/queryKeys.ts +++ b/GUI/src/utils/queryKeys.ts @@ -44,7 +44,7 @@ export const datasetQueryKeys = { ); }, GET_DATA_SETS: function (datasetId?: number|string, agencyId?:number|string, pageNum?: number, pageSize?: number) { - return ['datasets/data', datasetId, agencyId,pageNum].filter( + return ['datasets/data', datasetId, agencyId, pageNum, pageSize].filter( (val) => val !== undefined ); },