From e02aca560cc1564e7ad1fdb6b5e2da2c62fe35f0 Mon Sep 17 00:00:00 2001 From: adish-rmr Date: Sun, 8 Feb 2026 14:31:50 +0100 Subject: [PATCH] refactoring and cleaning --- .gitignore | 4 + CLAUDE.md | 126 +++ README.md | 0 data/jsonschema.json | 57 -- data/log_readme.md | 28 - {docs => data}/logiche.md | 0 data/{ => old_data}/clean_response_full.csv | 0 .../{ => old_data}/clean_responses_shrunk.csv | 0 .../echa-cosing-scraping-log.csv | 0 .../echa-reach-scraping-log.csv | 0 data/{ => old_data}/echa_full_scraping.csv | 0 data/{ => old_data}/exploded_cas_cosing.csv | 0 data/output.pdf | Bin 199866 -> 0 bytes data/pif_schema.json | 38 - data/summary.html | 1 - docs/REFACTORING.md | 211 ---- docs/test_summary.md | 295 ------ docs/testing_guide.md | 767 -------------- docs/user_journey.md | 6 - main.db | Bin 12288 -> 0 bytes main.db.wal | Bin 10338 -> 0 bytes marimo/database_creation.py | 158 --- marimo/parsing_echa.py | 194 ++++ marimo/test_obj.py | 49 + marimo/worflow.py | 412 ++++++++ old/_old/echaFind.py | 245 ----- old/_old/echaProcess.py | 946 ----------------- old/_old/find.py | 497 --------- old/_old/mongo_functions.py | 37 - old/_old/pubchem.py | 149 --- old/_old/scraper_cosing.py | 182 ---- old/echa_find.py | 223 ----- old/echa_pdf.py | 477 --------- old/echa_process.py | 947 ------------------ old/pdf_extraction.py | 467 --------- old/resources/ECHA_Logo.svg | 3 - old/resources/echa_chem_logo.svg | 141 --- old/resources/injectableHeader.html | 184 ---- pyproject.toml | 1 + pytest.ini | 28 - src/pif_compiler/classes/models.py | 482 +++++++-- src/pif_compiler/classes/pif_class.py | 36 - src/pif_compiler/classes/types_enum.py | 145 --- src/pif_compiler/functions/db_utils.py | 33 +- src/pif_compiler/functions/orders.py | 6 - src/pif_compiler/services/srv_cosing.py | 270 ++--- src/pif_compiler/services/srv_echa.py | 117 ++- tests/README.md | 220 ---- tests/RUN_TESTS.md | 86 -- tests/__init__.py | 3 - tests/conftest.py | 247 ----- tests/test_cosing_service.py | 254 ----- tests/test_echa_find.py | 857 ---------------- utils/README.md | 153 --- utils/changelog.py | 140 --- utils/create_user.py | 86 -- utils/docker-compose.yml | 28 - uv.lock | 13 + 58 files changed, 1439 insertions(+), 8610 deletions(-) create mode 100644 CLAUDE.md delete mode 100644 README.md delete mode 100644 data/jsonschema.json delete mode 100644 data/log_readme.md rename {docs => data}/logiche.md (100%) rename data/{ => old_data}/clean_response_full.csv (100%) rename data/{ => old_data}/clean_responses_shrunk.csv (100%) rename data/{ => old_data}/echa-cosing-scraping-log.csv (100%) rename data/{ => old_data}/echa-reach-scraping-log.csv (100%) rename data/{ => old_data}/echa_full_scraping.csv (100%) rename data/{ => old_data}/exploded_cas_cosing.csv (100%) delete mode 100644 data/output.pdf delete mode 100644 data/pif_schema.json delete mode 100644 data/summary.html delete mode 100644 docs/REFACTORING.md delete mode 100644 docs/test_summary.md delete mode 100644 docs/testing_guide.md delete mode 100644 docs/user_journey.md delete mode 100644 main.db delete mode 100644 main.db.wal delete mode 100644 marimo/database_creation.py create mode 100644 marimo/parsing_echa.py create mode 100644 marimo/test_obj.py create mode 100644 marimo/worflow.py delete mode 100644 old/_old/echaFind.py delete mode 100644 old/_old/echaProcess.py delete mode 100644 old/_old/find.py delete mode 100644 old/_old/mongo_functions.py delete mode 100644 old/_old/pubchem.py delete mode 100644 old/_old/scraper_cosing.py delete mode 100644 old/echa_find.py delete mode 100644 old/echa_pdf.py delete mode 100644 old/echa_process.py delete mode 100644 old/pdf_extraction.py delete mode 100644 old/resources/ECHA_Logo.svg delete mode 100644 old/resources/echa_chem_logo.svg delete mode 100644 old/resources/injectableHeader.html delete mode 100644 pytest.ini delete mode 100644 src/pif_compiler/classes/pif_class.py delete mode 100644 src/pif_compiler/classes/types_enum.py delete mode 100644 src/pif_compiler/functions/orders.py delete mode 100644 tests/README.md delete mode 100644 tests/RUN_TESTS.md delete mode 100644 tests/__init__.py delete mode 100644 tests/conftest.py delete mode 100644 tests/test_cosing_service.py delete mode 100644 tests/test_echa_find.py delete mode 100644 utils/README.md delete mode 100644 utils/changelog.py delete mode 100644 utils/create_user.py delete mode 100644 utils/docker-compose.yml diff --git a/.gitignore b/.gitignore index b7faf40..3f72cd3 100644 --- a/.gitignore +++ b/.gitignore @@ -205,3 +205,7 @@ cython_debug/ marimo/_static/ marimo/_lsp/ __marimo__/ + +# other + +pdfs/ \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..d60a399 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,126 @@ +# PIF Compiler - Comsoguard API + +## Project Overview + +**PIF Compiler** (branded as **Comsoguard API**) is a cosmetic safety assessment tool that compiles Product Information Files (PIF) for cosmetic products, as required by EU regulations. It aggregates toxicological, regulatory, and chemical data from multiple external sources to evaluate ingredient safety (DAP calculations, SED/MoS computations, NOAEL extraction). + +The primary language is **Italian** (variable names, comments, some log messages). Code is written in **Python 3.12**. + +## Tech Stack + +- **Framework**: FastAPI + Uvicorn +- **Package manager**: uv (with `pyproject.toml` + `uv.lock`) +- **Data models**: Pydantic v2 +- **Databases**: MongoDB (substance data cache via `pymongo`) + PostgreSQL (product presets, search logs, compilers via `psycopg2`) +- **Web scraping**: Playwright (PDF generation, browser automation), BeautifulSoup4 (HTML parsing) +- **External APIs**: PubChem (`pubchempy` + `pubchemprops`), COSING (EU Search API), ECHA (chem.echa.europa.eu) +- **Logging**: Python `logging` with rotating file handlers (`logs/debug.log`, `logs/error.log`) + +## Project Structure + +``` +src/pif_compiler/ +├── main.py # FastAPI app, middleware, exception handlers, routers +├── __init__.py +├── api/ +│ └── routes/ +│ ├── api_echa.py # ECHA endpoints (single + batch search) +│ ├── api_cosing.py # COSING endpoints (single + batch search) +│ └── common.py # PDF generation, PubChem, CIR search endpoints +├── classes/ +│ └── models.py # Pydantic models: Ingredient, DapInfo, CosingInfo, +│ # ToxIndicator, Toxicity, Esposition, RetentionFactors +├── functions/ +│ ├── common_func.py # PDF generation with Playwright +│ ├── common_log.py # Centralized logging configuration +│ └── db_utils.py # MongoDB + PostgreSQL connection helpers +└── services/ + ├── srv_echa.py # ECHA scraping, HTML parsing, toxicology extraction, + │ # orchestrator (validate -> check cache -> fetch -> store) + ├── srv_cosing.py # COSING API search + data cleaning + ├── srv_pubchem.py # PubChem property extraction (DAP data) + └── srv_cir.py # CIR (Cosmetic Ingredient Review) database search +``` + +### Other directories + +- `data/` - Input data files (`input.json` with sample INCI/CAS/percentage lists), old CSV data +- `logs/` - Rotating log files (debug.log, error.log) - auto-generated +- `pdfs/` - Generated PDF files from ECHA dossier pages +- `marimo/` - **Ignore this folder.** Debug/test notebooks, not part of the main application + +## Architecture & Data Flow + +### Core workflow (per ingredient) +1. **Input**: CAS number (and optionally INCI name + percentage) +2. **COSING** (`srv_cosing.py`): Search EU cosmetic ingredients database for regulatory restrictions, INCI match, annex references +3. **ECHA** (`srv_echa.py`): Search substance -> get dossier -> parse HTML index -> extract toxicological data (NOAEL, LD50, LOAEL) from acute & repeated dose toxicity pages +4. **PubChem** (`srv_pubchem.py`): Get molecular weight, XLogP, TPSA, melting point, dissociation constants +5. **DAP calculation** (`DapInfo` model): Dermal Absorption Percentage based on molecular properties (MW > 500, LogP, TPSA > 120, etc.) +6. **Toxicity ranking** (`Toxicity` model): Best toxicological indicator selection with priority (NOAEL > LOAEL > LD50) and safety factors + +### Caching strategy +- ECHA results are cached in MongoDB (`toxinfo.substance_index` collection) keyed by `substance.rmlCas` +- The orchestrator checks local cache before making external requests +- Search history is logged to PostgreSQL (`logs.search_history` table) + +## API Endpoints + +All routes are under `/api/v1`: + +| Method | Path | Description | +|--------|------|-------------| +| POST | `/echa/search` | Single ECHA substance search by CAS | +| POST | `/echa/batch-search` | Batch ECHA search for multiple CAS numbers | +| POST | `/cosing/search` | COSING search (by name, CAS, EC, or ID) | +| POST | `/cosing/batch-search` | Batch COSING search | +| POST | `/common/pubchem` | PubChem property lookup by CAS | +| POST | `/common/generate-pdf` | Generate PDF from URL via Playwright | +| GET | `/common/download-pdf/{name}` | Download a generated PDF | +| POST | `/common/cir-search` | CIR ingredient text search | +| GET | `/health`, `/ping` | Health check endpoints | + +Docs available at `/docs` (Swagger) and `/redoc`. + +## Environment Variables + +Configured via `.env` file (loaded with `python-dotenv`): + +- `ADMIN_USER` - MongoDB admin username +- `ADMIN_PASSWORD` - MongoDB admin password +- `MONGO_HOST` - MongoDB host +- `MONGO_PORT` - MongoDB port +- `DATABASE_URL` - PostgreSQL connection string + +## Development + +### Setup +```bash +uv sync # Install dependencies +playwright install # Install browser binaries for PDF generation +``` + +### Running the API +```bash +uv run python -m pif_compiler.main +# or +uv run uvicorn pif_compiler.main:app --reload --host 0.0.0.0 --port 8000 +``` + +### Key conventions +- Services in `services/` handle external API calls and data extraction +- Models in `classes/models.py` use Pydantic `@model_validator` and `@classmethod` builders for construction from raw API data +- The `orchestrator` pattern (see `srv_echa.py`) handles: validate input -> check local cache -> fetch from external -> store locally -> return +- All modules use the shared logger from `common_log.get_logger()` +- API routes define Pydantic request/response models inline in each route file + +### Important domain concepts +- **CAS number**: Chemical Abstracts Service identifier (e.g., "50-00-0") +- **INCI**: International Nomenclature of Cosmetic Ingredients +- **NOAEL**: No Observed Adverse Effect Level (preferred toxicity indicator) +- **LOAEL**: Lowest Observed Adverse Effect Level +- **LD50**: Lethal Dose 50% +- **DAP**: Dermal Absorption Percentage +- **SED**: Systemic Exposure Dosage +- **MoS**: Margin of Safety +- **PIF**: Product Information File (EU cosmetic regulation requirement) diff --git a/README.md b/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/data/jsonschema.json b/data/jsonschema.json deleted file mode 100644 index e067b7b..0000000 --- a/data/jsonschema.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "INCI": { - "type": "array", - "items": [ - { - "type": "string" - }, - { - "type": "string" - } - ] - }, - "CAS": { - "type": "array", - "items": [ - { - "type": "array", - "items": [ - { - "type": "string" - }, - { - "type": "string" - } - ] - }, - { - "type": "array", - "items": [ - { - "type": "string" - } - ] - } - ] - }, - "percentage": { - "type": "array", - "items": [ - { - "type": "number" - }, - { - "type": "number" - } - ] - } - }, - "required": [ - "INCI", - "CAS", - "percentage" - ] - } \ No newline at end of file diff --git a/data/log_readme.md b/data/log_readme.md deleted file mode 100644 index 3255554..0000000 --- a/data/log_readme.md +++ /dev/null @@ -1,28 +0,0 @@ -# Echa Scraping Log Readme - -Il file di log viene utilizzato durante lo scraping per tenere traccia delle sostanze estratte. - -**Colonne:** -- **casNo**: il numero CAS della sostanza. -- **substanceId**: l'identificativo della sostanza nel database COSING. -- **inciName**: il nome INCI della sostanza. -- **scraping_AcuteToxicity**: stato dello scraping della pagina *Acute Toxicity* (valori LD50, LC50, ecc.). -- **scraping_RepeatedDose**: stato dello scraping della pagina *Repeated Dose* (valori NOAEL, DNEL, ecc.). -- **timestamp**: il momento in cui il dato è stato registrato. - -**Valori possibili per scraping_AcuteToxicity e scraping_RepeatedDose:** -1. **no_lead_dossiers**: non esistono lead dossiers attivi o inattivi per la sostanza. -2. **successful_scrape**: dati estratti con successo dalla pagina. -3. **no_data_found**: è stato trovato un lead dossier, ma la pagina non esiste o non contiene dati. -4. **error**: diversi tipi di errori. - ---- - -Ho dedicato 20-30 minuti alla conferma manuale dei risultati *no_data_found* e *no_lead_dossiers*: ho verificato casualmente che non esistessero dossier o che le pagine fossero effettivamente prive di dati. - -Durante il primo full-scraping era presente un bug, che ho successivamente corretto, consentendo l'estrazione di altre 700 sostanze. Non so se siano presenti altri bug simili. - ---- - -Al momento ci sono **68 righe nel log con errori.** Sto investigando, ma nella maggior parte dei casi si tratta di errori causati dalla mancanza di dati nelle pagine. -In pratica, molti di questi sono semplicemente *no_data_found* erroneamente segnati come *error*. diff --git a/docs/logiche.md b/data/logiche.md similarity index 100% rename from docs/logiche.md rename to data/logiche.md diff --git a/data/clean_response_full.csv b/data/old_data/clean_response_full.csv similarity index 100% rename from data/clean_response_full.csv rename to data/old_data/clean_response_full.csv diff --git a/data/clean_responses_shrunk.csv b/data/old_data/clean_responses_shrunk.csv similarity index 100% rename from data/clean_responses_shrunk.csv rename to data/old_data/clean_responses_shrunk.csv diff --git a/data/echa-cosing-scraping-log.csv b/data/old_data/echa-cosing-scraping-log.csv similarity index 100% rename from data/echa-cosing-scraping-log.csv rename to data/old_data/echa-cosing-scraping-log.csv diff --git a/data/echa-reach-scraping-log.csv b/data/old_data/echa-reach-scraping-log.csv similarity index 100% rename from data/echa-reach-scraping-log.csv rename to data/old_data/echa-reach-scraping-log.csv diff --git a/data/echa_full_scraping.csv b/data/old_data/echa_full_scraping.csv similarity index 100% rename from data/echa_full_scraping.csv rename to data/old_data/echa_full_scraping.csv diff --git a/data/exploded_cas_cosing.csv b/data/old_data/exploded_cas_cosing.csv similarity index 100% rename from data/exploded_cas_cosing.csv rename to data/old_data/exploded_cas_cosing.csv diff --git a/data/output.pdf b/data/output.pdf deleted file mode 100644 index 0f6e3136db28a315da27558eea4b40247afe2edc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 199866 zcma&Nb8sd>1MVB!-Z&fkjcwbuZF^%UyRmKC+}O79#RYVR9DaRGc!H? zN7wV4r<+_+RGglPffbH?;rw(Dj)@pR>|ksS$IHto;%sK*>flUFC1UC9U}xoKM-9iQ z=50*Vq#yn_)Bm%CpC8W5-t<3xVEG??F)<=$f@2hxBW9F$aJDnD{U0K>{~;0)`j6=U zmCA|`1K=2CrTk`CiV(4@vzyuf{&7tI(*THD*}DGwR7P>zf8Qi( zX5wJ_?Zz3+v@()>~^5nwL` zE*x(#w!Yvghlc(ze0|(;_b*7yIa z%^dyNz}gT>HTvO7OB^EuYPC@t@M`3R`1N*wIWV$8)SDV=`uehf$kT{(NW(}Zu=6$e zG&3>s-A01xp$4sO`1;~7k(d`;bY~9s z<%`kaBzIxcG8fRbs3^eA`If z@BymBfH*qQLV@&69K*6v3IOci>=KSXDD{Dd)HJS>My-;LsWw|5cVsQQ*=^ zqJ@e&T8FN{8#h+%Vj?A9meql$^9N{C`j|+oZbg__$#CPOQE^C1YH|-s5BizGFSAl6 z3Mb;@@XY>NMpS>YTL6D|tsE~lYg(I2)ls8X#I#@qP!MOQC{b%Si|`20454`L>okKb zMcqHC!sqX16RSppEVD<;HkYYpKvZg1G=)hZmZeb0bDq-xJdU+WX^#@0R@Ne~Qtq5L zjyuCeVQjTlgb-t3(`*mQqz$zfkhtZNqawp3tLWd6P>G8h;L6}h1%*jAk38BT>3+#M z{@6#PJ51P+6>}uQDL~a{n^VJb92Iw6bIZ&t9lAD0+3olQR8N^xyZ~D=S7eRT_PSQ< zR!L0;T8`xqDAPQ6K7dr|d)kc{;qdbN`q`M_#=hCxTi$9U=MA0TMQ4dOx2I1l8wwnn zQDs{3-0wvGpInxVSA64i6x21^!s2M+tAAnM&4u!pS@(|Ac)rU*B9+qI@VcC1iuR4Z z=RJVSpN*lJu3t+{@4g>~9n*23NepM=^JZuv*3YK)G38Xor#NA>t!fJWy4(yQGcIv4 za-yoyc@0%@nE_Y%!Nwf7twt#js>i1^#cG!bm++y*q=0F6?%&5ZCK6bHcmoS)Ya~BVNSl;#aYLAB)=pdd)&8$0z497H^~&ni4sWoh zo7I}eMFZno17mpMXP%i{Z-f(b05bj)vkHERo35Yc-sCA@#YO*jZJ-a)zXHweGh9VrTOgn#%P22V47?| z6*T=}lXO+v*Nr<4*TVqujOklcNVO-`>Kxl(i%)*4y>60!HahRDtdh;KkQVnG5Jta6DhOG#wfr8_ubzzuoQh@ zX`8W3_w-I!H6oLtyN8=`3%TSGpIS*d;_8k+_1psw&ibqt$=;)oqmWs~h))p+89v}Y z9Vh!d>^?o3EXQ|hwS&O>y&nks7af8BR7peJv;QU8FxPTG#9zb+dNY{02C`DR6?W@0>O3Gyl^Adq-u zVW@_x&-zvZZi8X+vYx7!kNg(P#=%a@D3*~lRRB<8;qI7fCBLF^dVYPzr;3h^$HRfD zgZN1|vaR#r66S5KICFBG)Er%}L(^a)@sZU`8}4UJdi=)_vGR!_RQKZ0PQdqD=$e^N zJeTC4#O?@OyD}!L-&J39A>#4S+!W70*_;T=+M;~V4eA{?`B?4rR`>GDHq=j_kk9L+ zqvGQ0nfAIivAFWdA1r=o7w1>b-R7Uv_CHkH*{j>At_pKIeqaFxo(72Q{08{U^EOPn z#bbMth1B_Gz>o7k zXNZ>TJ>1UP`Ex7TW)|kqL;DiT?|e6AG(VCN?FjXW#_wr1{LN@%2;FdoSs)i5{}d3; z5Gr7Dm4=Rn*H?R-Tz-oM8rKnCFu=u?A~!1}qiC#sT$e6Ay1_-$?$10z1_WcBnGKw* zwM=7XySTL5y6HBF`%e^1V3;G{frSsYbI9@*b$&5}i+>j-2X%aFMXI-zzoI9gwSivHF6vOJ9BzE`1jlUi5W{9; z>B`>iI)~37brqM&+3|S=A3=c!OGd|%>%DDq1kCc`{whPVG~{{L74WsdrweQGGPt(q zk)ePG?KJ4AN0^fwgSqwWZ{$l1#Anv0$rH6j8$l2N(L8H33;=!Yrx)RG@)0Jp3I9@x(FSPb{;_qiF0S?O=%* zQgo`7p?&56pgOkb+L46S8Hg7uwh}zm2yLV~{{Db*%#tvLQ)LTgqqLJ6jUaiPPOUl! z4akXfUW2zxN6o7xSs#=-*DzX;^Ld)#sZsy<9;D3=S>GFy^t!3jy&QiY#pijAANo$0$&ryQ+HlJ)*4&!3mj9kmA10GAD6g z3I3>FI(P>O>_u8b>qjs{AP?JQJ!ZcSd|i7NB>bz!MPk-nZ zJyEh4{R*p>qp9BK){t4Jr&ld9fV_sHYW{O799kLK%5Q+fyJkve@*7FmITe}w=~eJ_ zT9fqyx2w|;5(cUulkf^xKzxe;gKUm<+#X94heVr969-3je@}6c$G8d060^U=33(@4 zddCbE&;7@3ECe+)M^Qut7mhAdh%SyC9<+<$P|&wwFm)_TE0_`0FjGC?Pk!h@P_lNY z4f6;w#j85Cd^pZDR_rhC1;Ivnoj!FZLD5p&?TA}XDt7zG3=;`EoP!W?_JXdl?>o3w zUpmM5Mm3L-z+iO7@|UQzc+#I%1yCxeuX$Sht94+bsHA_LaW?(7VivIqYC?k=nv3v# zo;|mJHN!+!EEUa3YNERW6OZon+j#& z_7+g}N4L(T3yqvhi-8M3(`gwNr>SaH=Ak7N;q08?WSCepF&%D+)t0u*>Ld<)xQc35 zkE(Z4f?K}&?yTd^)D45<-iYT|b~F;q$(5DpqztI(Q_wE*ZhQ7EP$)CI4)O$+4#rpK z!x4_VcOu36uJK#q8guq1Ps(RV>YQ{I(7nHO`N6>Z*fBU~aP74HC8>$g49kZjG&}rj z8!&LQlsGfaLeFH4`_hnk0kq4Hs3XAA*&@^2 zCWaYmDokw^02_6Zoc)1ZX^7_BRt#ocmvx)vNi2ykKPtFanwWO10s}iE(FXLKlr2Z+ z=!(o`hL&#xym&UZjC_2k_(){%M>3fDmhysa3d~v9@1kX>ma{0|?2WlpQJXW99HPhY zU_$j%9JvqI3?7=up3S3IT^!uehUx`K$<1oFW2*Y1w)^dR`yJb<4I_KWf_8%CG&73e zk(C7B_Cx?Ct3SC>lr2^z670yiSWn_j7y)@#z69*`y$9@NfSzoF}!!E;(zbdc8?th_|)anUGt~dQd;r!3cEQ8 z#eQ5wU+(Q_;Y>O6`7)JRn6&aM*-%a$`*^Zq?!JT?y`xl=8c>NMeYRMvuDT1mzDNd+ zusk*@kneo{uwV~fxaxo6I_}zXUru?gYwp}xxcotsroi7xWOv$`_%!i48ZEwxE>{+< zfA_-g+%@vLeRUx=aUlvZ^LWSbW@zE6u;^B?QafA!&STXWZ?4Tb{|%yk^5p&>mjADi z{9mb_>3_od{{ylABl-UuTxb0s`2T;zbyg0}{{`1|rsBw2|MaY=9IpL3d7%UcAjGgX z%xn|%Ap`U$0Nx0a{FfVyKhwsH)h*mTn#DWe%Uvzi{6(OGZ)L>Av5aTFu`~@|OJw7h ztX@BWcL(HOp{hnkP@4X4wG##btsm!i2chr@M_rdI99;YNsWK#XcIn?=Be|)^y9xrj z{@)h*{x8qK4TaR5?=h?(;KB~$YI?tzsgR}H>jUr(+B!g&|MNabS8xi#Ht=6-iU7b( z7mgj>4AE;y2rYFVlLmp=8dk?juHv+%;zhjpCmYeGWoZp0 zueigY*vQSYSaR@&^ag?>uCK7p`s$zdJDaSD5fSwycA8OECQ>4_T1W)d$=1CtM2odQ z^>&_gUR!8Sd(ALLm2Wklxd8uRsh!#9Tn5MxCu(B1cRclHgpbp3dwVw_A9H(2I}3Y7 zvKZ@|NfFByTu=cjz5}>Uhry93eRZni!Ji6CY zpx?an=*k}5iKE?bpu(VbXPZwc_y>>?V%wO}Gk^CPiQ03jy|89`r9=2hzqWSC)4jyB zcZe$YO&_F?x><6Uj2d|<-+YRFq>op1T{TJ32FRX-!m>8C-?i9pk{ht8XBb@1YbnzY z$Xz1k$Dv`(I^?AMvBJ37&OV`R9>2ztCz{ z=BkrgB`$dT5m+0wRdeHTVT?Qv*jv^wf1Y8NY)ygAUt1nh9oPEKrEypMfmxA9WFA>Z z8Q6^Z2GkxARqZBO5Kn)0Q==UnxarT})8VX)h`Aio_ufU9lnca>m;NYLX2eaj zA4%w{*7JL$!^nF5@Us9tWCP>K;|3Ntjyj$9y?# z9#|(DSoQJlxEv5dy$(qC8!(mnv^YK{VB?g1YW~$g>;+yP)hFZU{G8U`*)ppPXLpMa zyW-ll4SyYe>uBG3HGiAcMGo%QtIbq!ziXiNkL4vS-&NB0oXI}r(9nOuEnl}0P`%eg zd?Gnv*y%2OKH$6}Ea-is`)UCRaGu~n8PMw3Ru?gx?)$n$i+!ksIc3$iy+(Y@M-dJ4x#JQG&^qO*H`q%Tg!} zhVjn}VkRSaFihiLJC`XoD;K%g>hnBZxAH}Tq)Mg>r+SBa3Fj7it>;; z*bl$URE!)O@lJU#jTX8~z9qcG!o}_Vcn4H1CdS{>o`rF~N4Uv9~usJ<{+H)Im|M!|FZ`;p#r;XDWlcI0#|9 znM4sI{v^s_Xl&OFR|sP}k!R;@1gxDWZ;jQ};$>zTCiEXNifAXA=nZjm?NtSi8P%?5 z=)PC1*pR*WzK#F|7z5h>GpQECni?xDeDrEYvKF^izd|gXSb+!ut2nPAUV zU;lIeFm7ThP|w6}@cNyPs9mfSAiBl}ARrCC?ZjbTtTsKKM(oj^nG6u)EltWpu?r=p znM7+Fpv0olWspf*Ugbk1HTq2I@N7o&#VN}%X}m(@P`kt+P#_&>0QFj_6*Vq;37J?7 z91~cfc6V8TIgkqxWSzS=gK4Q)3`(=PwAkBInReXGkp2i-u8@1m0ZpU(*Bl5+YN^Q! z;(uH}eF%(UbSQ&!dnu9)+uRTwb+D3cwRlZTWhjtL204N!0jie(;wHJKff-5<%%dXB z1hL-+aaNFTt2reCpuXxk`Bac(5be3x+`5H^0pJ*H3ewi0ipeXhl+ok8p5DhN=!NX> z{Luyv5*7|Ixo9oDiAq2n4yPd-s9{zeM>~2&uNo{>rhrZ6q19Grl&3&DzAuwTD&)!r z6qa5#CR1(F=8;6OhR5XanB3jbvO}ZHpRdXnwIq!oDS~BgY5Ra&HTIhL)5aXCuRGt= z4zi3vVoa+HE>z2O)yo*6-c)QM8Ws5R2Wl12B2|oAXp=RBoXk@eB zJd+9&&`&|i26akI=8@FGas*z?V={MRb940<3I#zlkaUyH|0gCKT!JO883#OF3<{$_ z9RR?Zp9d-yQNJW82Vo;>b5k(yp;d&~3Bf2EloU*>tc&IhXfuXs2e4P;PYTh}zaX=x z&2zfYqJ+(JQeaV7^IZzjn*U9iWRgUG7Hu}>)Hy`Wv@*2GZV}WO0!7WH>q3q8&a2lb zE9P=y4u_)H$X*OGM6e$sG~TUWyM6H^m+gY(9SoB?Vp|nouLUM z;%5oF?1sXSC1hZNvdqUZQ->(%w|2q#)}vlA~0^=Na{ar z>1eeVxc;`hkL9UsJ5rU|75fZz2+R}qQnZvF_&&S7j(i}i0_5^3g=x|Z=INE`5VID* zJ!QPNY&*aa#wVg2%;Z>i6$r0cIUO22sbHnJ8lTQH=`Gc1+Dd zx`9{}^oZ8t%c2Jisg`Ed9S~6CbOaNQ;%p2$ueA9O@uFii4pcUAF<_SAbwKZC)=;|! z=2`Cc|G50v%I7~ucLbH||8e;jM}1tGQE5+~YGBjdqJY@nD3&G@dQ)ko#gi^urogZ$ zu#LZ2LaMCc1W?0lhx z8Tr45^)s|`4|P;`LCeB5=iE6sp5WQ!S(*fBTJ1z9^5Y91BtWZdlKlR;V-YL+y}Og? z5N#$0xwfx?jUMYGs0Q1BY^vZBmYbtW^NtoU(66SHWq`ND+5P^) z)H(9Dbo)r?Q6e+{tCGxe*RCgMWyd*mU15FQeD&jQH;|r}=CzJ7%|2O|??Pa8W&g_9 zWCM1t#+>^sdy9AbKJ-1);Cr%VcSg4VW8F(&y!d^Eudv0tXi7`J(BL8uqq{ZhdgWQ# z|AykAtw7qRJr3^iDtVGgCzHDApmlKMI`nDi0=?fh#s6Xa!@L`L$DYi#CG+612GQyL zCh*Z>Nw@B)K*k3{k5DXCVh*U}>h<5M8mg}4>V-YTA7$;l)(r6(6NN{2W$n6*a^DV7 z48xB~uCLYDdczjXg~0RnGsxqsL7T=E)!;UE=Bvr$Iy7c(Hek`|MKaB(<>`hS4^aw_ z`0@2lrdXKD{%+ zSG~%VT;Y4HWOVypGq6@1nsiwJGYchkXd z(d1TJmrwoU!3bo;F9r_I8Wh4*?&~rNLd|!n*t;A-M|E31_L@12+RhXmuK7*cJ2B60 z6yLPnxI+LxY8KX8oA=b(h`7Y(#=H49kEWjwuV*x+{f?!s-+4D*x6*F{zHW@xn)cGv zUq0W*i}RT{(KAyakwTk*Qh$B0>8l0q=6ebXb6z%P3UM2OvlB0}U0c8A@^p^+up<>B zGvhh=P8*;c>q|Db7f(ud*ZIu$SSSBvcGt%2+z;+KJh`IIWQpF>!2?U#>?dYzO^;5( zshULJmXcMem_k4IlxNGPYz!7|XG%!(343eL^BuXZna@tp5D3roa%=XGeS5E|ZKa${ z4_MYt*>h$GiAFrPR)?t;wT}NPm&Z@bjSjCdwOR z4^ji+DgAiQTeX#cMriLOtu+`wsnhoR8U4u6Fe*Q&|4modcSm#irInQupDQnry&oWn ziDmu^cpQ-P^_2?TDb1D7`rS0W;qQ~oBKR+1j*$VQ-k9n2>(mVv_;!3d)A`4CGpAGG z3mVrGL0+#npRgL{j;LBNV=2hF@&<{YZm&1B=)!19`YZ1@@jx43luwW`Pm}5*TD{|% z;%%69AOY-eIG4KwHV;kP!WbV{S(Ap{u!buu%bL290p}f6MFxmJ;{DmP+(ymfprQ2u zzM7ahXY~9L=LcY#yQ|p7D(#pv*ciBd{K5#>0tMj0^}ZzNnxFTG3T{cs92c+R2#PQR z<(fYWt$eNF`^UB)mw}4S{GZB%%Ukb~%{#0Y|qM~+-1{!T-qj?FR z)Ms9J{81OQ`wl#e$?23$kM_1vV<+@rlQiKYrfBXHHJhnfPMY2=Ch@+{>Fe3^KRTn` z9Yl>+%exavC}+%jN1=a)cDt`@eiJFU=5C_vW_7E#=#3AIAj%qomz{! zoR$fzS!va3TM-yr6gGO>@JCBTeWbTNNbP25Ln!zvIxJQs5cJD%ebv=+YlnY?=3|$6 z5a1{A)V#FnyoA!gRjOXzXw%jgMeGt*aG1+`?U4G{1xICtWANW{CTne{neq*$@I+r{ zSu&?-qoszlDA)yLk=4{@E+LnjC$NmfOpXtP{Lz^T&a4~_9H^HcCO4t*Ss})Hq*L(P ztGbbWBJ>}{8t_EfXCur&Ru;a|d+}e0QPIu1DVKicBwc~awRAUrmrBmj3TQYnomt$? zieXM+I87S2od7Hgs;nNA+^{*Z*cKuFMEAeSsV9Io=1nCyXEErSbLW7Zh-L5aG7wOi znpCg|WF52Q`>k&*QPLHL|M>Y>-}B}D9u_U~X3N1s%)@`ZJ2sK}|%-?Q+w-kQP?5@&z+VbXOQ)xOM zoOVf|UsUh9G5t!8XUV8q5xt*L^JeWRE5tEm&p53siNT1ZF*DjJ@(^9SKO=LKcr-1D z*y#NEBNZ}!xkGu`kFaa&$uxW#?~!Ws9UZ%SJL+8i&>=^EYF41ILEhdW?DX?7ll{rK zle^`_PG{Ols>49LJA^2sDziu6p*T7InyjY9DIiKO3Vf%wns6g#v$pzbMLqtyOZU*< z?(t;$2)?uBPPb>)eG*<3Bka;I-oCSQy_kRGj1JRxIktbv2)*6R5S*s7gLMKw5#z%& zXvKsJpT+Ci;JfD2ChX$*>+#^kVYb)Y@k1EhJb8`LtmNX@BW|-*G^hdH%w|z=oyCLS z`ntcTaW%K+oo~yd^!netBAeHNibpXo<_!0|Gy$ChL7w_G>hhTiR$6e_h1lXlXv!5=b zk7AZdbbAbyIw zTwyoM4GA()yOKay${K6)l<9KYXP$~wi3ofaU*xodAl6unAA^+X5X65kE)~=1zwhpM z5gX}V*4pnffKUmg0?4S;4&qKE-O3#MD{cy^hUx<^?2T}%LY7a14jJGnr*Te1M6`Ti zJ;+cr{{|t_>+O; z@L$1*C1+V`7%&n{agmXc2=rjsbU?Z+S{DT4eSIZ*oPMm0?5@&4b6Pu4o3me|@>~U| ziQai6v`MCMwANccJjzN&_40?pX*YR49?>i&uHp!wDfKJ%Sd530(ZvxzaUr1QR{|1b%D5@v#?@(nzqi)N97&X#b*(Vamf^V5)WJpnh$BBwNXgRE%vdu} zG2}1y^;6S9=R^p0sbV-&=qXWzMx07A9@@9hQE9d>sGhALBy%YY4K_z5jA3MwhH!QN z102?ruoPVUy^VeQWhw_wGF2PbnN{3)*TI?4OC#NDLV1C!?_e}_4NZ6w@+u-zx&&u8 z#~Rc=ojIK?bHrcrTG95QrlK5JA3gS<=8vD4G?dhXQ4^KADv{Q&RcJGl%^?4R45MJ+ zLgW~lGpKeN>yM-iEm~a4-%(i01u}+tlMTjf*-DXePM_7nZ2UCR?Kfc!%YU3SHA$K} z^rcGbL!+15|m_QtIIaQiJd>uqu5sgC{e3qY)?eQFdLiJqEMQ+tWa&E zseD42-M^RvSrfFV-GRwApgIy2Ny!e0+CDWGAGZ%C&E;ikNr^J<4o^FZQif~Q+8I+c zK^-JyOV$K7Ek7MxG#>H`+Sg441fGbHyfw{+EJtRf?_;FJh~(d2vvA=4Wf_e$zdaw9 zStKwkF=o1ISI>5AhbD~dEeqNRmRHYZx#L$deA{LL?cP zs~6Z7x!yPzk5U+*EhrX)9v4G&6Zr2ZIN5)jy`PG*1?vZ58w``^X8+il_b!UPCRYBe zCM4Qvb)gnVug?9NE*+09L2PLM?yE=B_ zWLIuUsYSwL8i|K;Fg8gHIhl>_Kk$0vc$aqGlNU2B0r4u^Lev>Nn{A80ibK6 zi|ljzFu+slE~bn`e`lnP7xG5-jp+75SLuNKX`Y{&D^%Fq_m=FvKyRh@SR&;{pHa=T z_0>@pKD{`TLhjAbIR6-u0X{MTsz{o23gi8I9&`NMXBt=XMQ zk;|XYNRQuM;T-}s7s!us23Y*92j2ece4aP{4_O|;{F9!CMWpUB9M+q=>8`iK)h&_6 z!=H_N-nFd^#XzHHk40OXuUmoIsQupmjnn7+zjgXtEdSpe=f608l^c9P*H3JC0GMXW zMI$<>7YJ7iY8BWj7w|IpXQxkxMQ1aQ4HwqovJvY&y;hZgdVR%OGUr<_802_m$J?n8 zaO-`7G4%t!T-XSTs`u;uF1|M@z`(!VQEZ;y;p>S8jF?hwetG=u-eD(sWVcp8ujeIh zLgBl;%irg9UxyX~co~n4KWT_c9fJzIe4j{N{L{2+P3KEs5x5ZwrXv&Uvgi8#)2tfUGG>J+@T(U&xek! z7uK3z!QUp=GzFp&jBF{RL)M_^ALa6Do{CwV4#!-b_NSKn3KKzDW-J8z6_nb1IaZFB z5)o|${mY@fQXqKE&7oa&$q2)VDN~JJUnQ(DZk0~9KVLtwyV_|HZ#5pRy!qU?e8FnV zo5N#A+Bk$*6+N337mY0)WMZ`4EMG;ZE&&WXm#eC#&7CBdfn1VYMx6kfrfS|6_e3=g zXXiLHeqR|+U(YDCtJN}aeDu!yM+erMU{@zbD@=F(FPC32mH+;Ny|-;=>s+saesj*V zjIDY@|M6LehQVyIa@_ECj>9I#@=~JXhl}nFIP`8hQWE^8LVb&PS%GKF{tsuE?_fX| zB0HaCzfNN30^JIFH9EsxpSHxX|IvkJxgUv$kWuv~1<{M-TD07X&WU?3p=m;}?9D}5 zFWzTPHeSzbtkcu9Z@q#iV;&)DPV#C_!t+#4%QMt;<+=LO<*Dt^RA)bBsXd3&dycD{ zppgLLa6xdavjy%T?E@s%m*fvIQ`_)DmhZU|UN}_fJv#wM{LuQsg-?IiT*Jy(1cC!< z8i{^80V6r6Og&qT$f&!Y>ckA&P5r)n@(vq{F)*D zMVtBDz&IdTT`WYeL5PnzXq|{V$2PubesJLf0hznPs)XyaaTPylL1imT0TC4GK_lKn z!@_-T4W8?TDAFYztH2?xMZOnS1`$79+n|=Ez59fPzGcUO7ai9mRjkB9nk#?GgY{*H zM!azUTu$#b-7j9TU8uv#E>{dLB;lJxQ2o|Z$K0{TxObEDwj<|dCnE80A~#)l9`GZh zK7_}{IG?$_hpGc}N;G1g^ETwyajlVTw@qkdc(5Z2g4Q+qTn_?D6h}fCBo?^0vV)h$ ztQ%)&gPF%?G5J^bwOby_Rf|+!Xr}|>GZOjwg@GKp-$%H6Pic>^kheQ5;RI&D!bNw( zHp^xFHrKRR?XmNtS-4PUaz0T53*&8xLb31UlKi)FyiyWlR~2O>1o z96_2qrBm>jP^e+Fc_#Qgb^r>)_1j@soR?l)6L;ogAnFxAfN4NDr$!+JoOF0}4e8b5 zyzG$diZt7!|jc0R!;uIZBlL12Z)r zbP0nK%Nrb?EwfGPl_&c_OZygwoXN~we*#T&YtxR_O%alK^Mta@7_TFH2}kwEU)#d# zJQ~{$CRyWFD1;1pUJALEfVL`$=n|;5s&eU)?uevu)HY&Klx7=cy37~DC?D4Nw}&Yc>p_uXP0X@%{1;oXI|019%Qf{e`t7sA zZa~}0M-BUS#}4Nmt-R#iS~7#n2raS7*hWj|<^gpYgUg{I&Pu*coZF6p9XsirE=>>-BH!%5;Q>r!W4%Oh%8F5k|?>8)h1!7G4u&8}e#q@8^ z$l&T6bH>R@{ZwN2ysF_6K@o;z1Q_#&9=|}C=G4@Oi)2uQ;{2*eLVSXtaPh%a$APh_ zYf8y)wB&yt7wG7PAmK$W^mn)C4I$J-M7l%;eoHzE{f$ByK>P^-LVk)?0iJr>O<)LH~7yx@C9mi zein|&59fOr+jZfsXJHGv)mskbikLxO>9PXya`0*`6niE;08xv?Ke)jNDBK>4eirG- zi>)l`Q+|j{Ep?z|;A`Pyml+0sXviT?Ej`Bfurz;= z+$tH+J=Crxx*H^dl4|cM!it{-ux3vFgWOf@oTnx@jq@jG00lJ>V4PFYA}!VKRhrcR z%^{^Lsha7AEB|-B|5wQth!%x$(Ysc12|fA@0NYcNhRrF0w}ea@0&QCfKuSQBD13FA zLr$6@$BNJ6q21kExttz-CMVoUB`)XW9Kvex&@UYlYbM<75chD&i`*1>7b7BOhrUJ2 z{zPhbYh&!QI^%(yn!6H^7qx{c{?A1t4lx8 zJL)b~cdsyzM!rf@A67-dgHh&oauXc|%UlXC=^493{SU$2Zb5V4^XU2^K&mdxO-Eezx6d$Jh~l){Syx90ucIW zy=0B-{sODdk6n$4;EoH_LyI}hTvD>B-mQFD(^Wxh1~D-buL90Cv>jUMbmXzZS_mBd zQdmi##Vv$VJ*^VviQN1lzc7ko!B$LiU3UqxRE#x##)?|^g#|2KGDEpnGDjD_iAi|~ zdPmOt%FFba*QVrCR7avcOmoom`$sa|RDqj}iw%KLHfn->Gi0W%7}FHNiHJn?h6$g* z3=wFDmYHGGzQt@;!TenKgg3ti6g2fcw@BdNfKDyGTa`-J9e>^4&L(gCQDu8MYr*G9*I3A(}6*huUKWD;E00j*|eGTSeowp3vp=cPZ>0IXAB@@b-esmlJ0G1sB-c6z5>rcptmH~y8bl$S9Ha%bjAmsQO;=9z4e&Av{OO@v0)gY)@6nsW$fkGA z;QFn}43B>{KxGEfk5zAd&6O*Y!D0bu+mR)g+IZ^BuE$?k5}j!gK$3fr&6nDE?EIuV zoimZ!bKHO-mn*w4L_S?Ik=uRB+$-bqPkUPe-ww1ZEeA4xdMIp!>ar;R$+k;vymW5S zn{Js{bLZM%u-E{ySa;@S7FGWDbKA}=yVNT0f~SH*bgRxo`0{@mWc$t!WOMB|^8df_ zX9oP=8Gkka+yA=XN>3;Lf<4Z^)?n`#vVcTPFVHa1WrEkj>DMRdS~>z&He`a|dO+`Q zU+T{2hM@$#RQ}{}d7>728H}L`$|&UwkLhi(LM*t5yN|fSh@Qrsi2xP}hLZ zgNNMj<k&`2V?}O_t^4r=Sg@AV+x8CpT)BERz9e=0n zu&Z|gh1F*$9uw}u>dT&Qr?>RoozClTKVe1CKIkH(j_dEGPuud+!k-7(Zlqf;w_YN1 znt%V;OLQRnwYt#yovRCFaez@I#zqnHW^F{M;FXC$aLfysb!hDN;bNznmIk?>FxfXN zpR^;2uuOyIyj#AH^!f=<146RF4n}e#&#?!4{jb+9d0bO3;usor0@7El=Q<*lz~6(o z6E9i=A%-D)pYrh1Y>lRU7UF4X(gG|E`O0}oDtDg2r4mVc)X=efrhi9m zT%HBu8!hhuhIwoJVC&;^yHau*efEN9}-7hEuO#XDqs2)r32Rft4(+N${PUwkqAB zyH{qpn_6+e)@W9tb(_-P%wOhE?+JFztbX15& zi&bG7RqjS4ZSLu(#>Z-ZtNu2IPREhzFud1RImTv~TNS^C$?$z~*Mu1;^yYl=LV~Zg zDPMd$WJzz;U6V2*Zog8$oqd}HCbe=aOY~EpcbDSFh!Mz@_Hpe{|5Wfw*F%}<#}QT~ z33Xl-!$bk;a}CHsQPP5A7-XRNc|1=ASR=1H?8xhijO#iFl&61v9twzp+-x~eidOb1 z$1Hkbf>yblYE`-ditLWnVB{0y(kQP-!a%1O;K{fBt>5HCKGUeab-usJgSpLj`61zL_wg;6x z4&v0Z{`_^Wn-Trd-kM|E&C=o`tpirNFzf3qR2Ux_0VAI&%er3j^(6X>v&m)*t;C&e zu#E2?A_zb5qY|u2)>CR{^Iuzf^UzeX;G(FIamT3#gJBaGr+{6HWB?Gy#qnaV3;v?S zMlR-yhobl}Ma+TZNnKjYlHQ~rpfVLq$K&3m{kWpnB61>DjvPo|t~%Kk3R+T5GByd6HQL zEkeqhXQE$uptUYmfu~SuRD3cHj6z8uii-M0jFY_%V#aE86L}URIDwMmRs7ih+Y!PRNvLHhyN%{$u1A)t^e(FEhwoE(jYNbKuiQomyOw$025NL zc)TN|^mi7h+i*zbT_Z9&cyE#BjCRn4Gr5yliC%uEfeV2u&!b8?!_327LYrhMRJlu+ zrUcK>m)1+bQOw1%FGHnn+^qke8ieUTf|V4)947OZPd5~4;Qe$BjHh5?n(xn0*#g8RcWaGpY*98w_)OXz4jAOX*Lr)}y^<%KAj<71^bKq}c_21l{t% zihSj0^#6TnAfFwW2uv`WR+Zp?QlR*AZM7Y?XJF1@(=k=D#`mSb*9m~{2oDw>~9D>{xdnTu2IkZwJf zzGoNxd!53pyMK=~#h8LC>+P5|Enmu_nMtG;G2~0i#&_r1u;H>j?RZE2d)&p^c2re) zt_>&rm8e`?$dTzVy#$3B@=|h+VrE?w??`%NZ!705rJP}AvM!u)} zpiUjo?&J?w~hCGdN99SqPKn%e~BEMHUBYffCB$ZvE4=t zRF>l_M%#kkTgbQ7Ce$%dW-!kihuLVI<~yae&i_Z<4_VG_46yl-_bza0ON8I4c}FyRcJLfs6M(Yh`;)LdlZp>GD{FS;sc%Ig9DcD zX0S$W(R2J#hSX2)VdN!zs5{{C!=csve<(GE!ocLEgF>;%R3MR3c(sosWc-M5*|y6d zF&{8@iQ4?cg2&yfc2Z1ryyL--BeDc-L+;dDE8GWS?5givyhAs%i_j9M&JD)4v1^s>EgPNE zl?QQ^OHZl&?8c5)t$Opnb?U&n$Rt^#OygUZmV8djQ*+j(6sl>-R!Y0(s5WFoL*UaP zwi$a3&$8E_9LAUh8L$&At-Y}2=;}8JwfEjX%B&w%NHoz#E9vkZmg`lNE6lJ?%wy;T zf2{UP*J0;GLNc~CSwoCh&b=k!LyaW75~^DGO9|$gFARsJg&K;?gGX8&7xtXVHY$6- zBVhMA{roMTXJZc?gkPkR;mQ{FrZx~+il3y#@q*gW|NUTQI?kNMctSSJ9ObIr8K6@4 zGnXr}T#V@Kk?hza+L1g*XT`Ip6RNtJAG}dlpVRQwh&?U`huuRf&S^B4D9&Dch`f=+ zsZUyn0NEY@OJ;%G{Tvvr;fk`d$8#RIb87Yn4d-wW}b8eC(ezx{#tc|ApQ zrG1FR7Ev;u#lhAZn^awcJ#Ovh#UTOsa?_@?BfQcQxTNJCL;_5Ig`3_6Db=4~Dl}hp zb0mFpx8Xo!aY$($jDi-)R}KB~O!_3z@s+JAu^vSu?1Hz5jKM;xQH}mU)65rEcGOK@ z(jtF6!3Js3|A&!t>dpj8yKQXSM#Z*m^Nnp(Y}>YN+o{;LZKLAkt8>|B^hN)Oy~mhq zt!E0-2pEamP-w9|D7GNZW3`Q=6`8P9K${~4@R7tQ<7l}AD>e1(jrZ~>^8XRflxBa)!|a%$jDcO&h$0 zt%KA&dj6ejL4o^1%4r;ww?dVtzDFPcTIe=CB2Cx&qZJPZsDa;tOF*p%S4(r<=4%l7 zlXp5Qwz=R;2$F>)YU_)&GU9n$WcCe^nh~%EdAr{Dww9-(z!)`e5GIttYaD3RGsNre zqg#LN1#n~BE~;h0jHe@TW0Aoik~@936BOHI`I3KBQe$)$RB1ZmZa74**kr1s?gpvqnPxUd`Ked&@_b=n*IyEsVXL5R+yXdtKHsnECQ>)jZfoOw)7Q z#8Pk0nEqIb4x!2>@8v+-uqipy$?6zzy$7KJ`0^sYT9&WQzUu&aH%L)1It5Bzg_F{y z0T=zkmQ7}NhFA1nE^JY$7-`H)yvQXJC{fK%BE)8BC1+|kppSMjvu+fyyON$vgd`K} zg_}UJK_X1Gn(LMO-iYU%!}auibklUzyd=0}vbwEkrFU~++I%E>tryFOL=2kPm`g#- z!D@zasrw+^c;cZiqzw``xmz*sdy%fGfc@03U~94Zf#iLB0)gx>^>WL<95GWs%jDgs z@LPuB?^m{%zHl$L{dVE_^Xiy`r{vKUhXeRBD7Z5O9*esyTRqnOO^^oGRr%tPHJK-Q zbPf)*#YuQfJeWMN#ClO#D%)**S36og@^B_&ye)=ZEAokDdB~yiO}N70_rR!T1TUQx zQRvxFb=S&wIqo+T_dROWfm2z;(;cZRQ6E3dluc(gq8oKeI zc7119g-^h=Tf*mnL*EcGyxe+NGoF7UTDxLPwe^}V@th?uvD*c+`3>HXJ8_(V`dK@97PI?LE7?#(6GrMnN|FeQx0V0}FOrYqydDo_?c!^8ODdl7kG^az6P+t3dYd2A7Zc6= zw!406-?MSp(}w4=t17~|PzaA0DjAj749`X(sPgsOkiZV`#z}LhF8!3%4!hRy_DWB} z+%Ax=EvualTwOhgoqNTj8s#StWra^0zC@m4|n3Kbe5)(E)% zeRA!(T$WYPcm3oA+c`_%G2GwPf7giP*nLQ$xDps?QaBI_$;q6!nOFBf})(tIPhCYVevU;z!3nr@3{M*?3Dx-}Lrfztw{{w9{rWC?SQb z{X!xepGNv#Pz6HJTX7E^irhYJP=I8B5gRuG>+u~t_>mR)U1TvNaxwO}3cTu9iPq6( z&cq5S_bgq{AG%hMt>6U+`+Ie)jTy=~!PyQ*w6g3)w!c4so0h}(OE+5tg~MRdwu7%Y zF|62E#u&xT>waVhbY&FF3OC% zZKs}wEJ_Q$FZo)W8pyS0Ii3Tjg@^xSYfy&ge!9S+>d+aETZEe3^teb}@UPl+NV)Kh z?c^VKz+OqA__MFK*FkM@{Lw`~>NG8bys7FaQ=l5z8ZXV=5HrCw#*hXn<1PenpChh6 zWVTfRt5nB~a2=W3m1I$hi(8=QRQLCli8aPD0wp3jOzD01p!_l^$Ej5uQU1#B$H0!! z9H?uK@Mg>)J~BCZ6y}!Gwj099sKy`r%x&+<4+Qe^%Mqyw_^OTEQ%Af#4KIZpTBlBY z2xM^k#D#c;6v+-h^pio?C-U}p#Uhh!Oow_$N2Pj5z?tS|O9~`;6>HEchy33v%z86M z%P^XwT@ETiTv2AHIiJ!-Qo?JYz&+0% z5xX<*+VB=E-Wo5m0tK!F-4-(M_irK1KgubsnN>=nN~XzL+LOEV3|(2eAk*?|2V2u= z$yN-KB8FZ)g7m7qdUVTrs3E^SAPIr%k&i#ox~qjhY355m>^HjU4UkInmi7CI=40I2 z{e{TRz!Q46Yk0zz5j)$Ay;OTQ+0x%D9}E8wXr?M3C1J3hZzD=a0)=un*US~0_*FS4 zi|jdHIa{rMTkzgZ4`se+xH|Fq8=JE}2wdM>T~pp(J00LZ)y~=^auG~WZ>$wQ zRPKjin4zlPqcY1tFZ7^ZfaT+*#-o(+JuwJ#&2w}O?qHYsKn!RN;Sn_Xy-%mWkFTV4 zQ1SZ!#O8>+3 z&Csk*wlICe`mHfb3l};EJySToce(qybM`MtU)ox?rDWdd{(fWkNsg22rLF9TW%c1* zBK}Q;z8L4fu_q zkc*2<-`oNN0Ry)(Lqm+xmmkCNX4P|mT-unFo3Q7QWn;o5{6`Axtqe)oPWm_qU4z-|C+)Ppjjp|9)os!#>}I7L|)n85iuI zFYWoS!_k+;*vPaxO67GB7iv~{bHy=wE0_W7F%H8YjN)sqne)ym?+kz zU&D3i$fN=$3qErDw3Q13SC9M8?08yCBfgjCkH&pe)quq% z%_eR1N}7^J=Em|W`ju%jU0u{dUS%eur*4BMyvR4VoXc*jsrHANuNbBVYb`*>@ZAN( zv5Y~alR7P7sQ)RTc;qi-o1r6{<^UvA^0@*9$DX&Q=JSj6mErel5Tep+88JELGBT*G zwX^*z4eQnU{ta1W<3$`ZrTp#8aE?1mz6j2FEodD*VpWebRM(M8-_vC&J)9g zjAlnI7%MUp2hkq@iLY1QJahG5OgiHmlo$kPF0XeIl49|Dr9kMpKJpe2@yR#gO8F`H zcj@hGyal*;MkpFXATw8q!oU?WJxOwZet1FCl9$#L$$Y7Q#ao^Tkp(Ji2k<~IjC|X)GWg+P#VGP7U8M`)oPRlsy>{axS?xab%q)Qh*^INW3k9-Y z6Z`4*+Kq*9vo?4T6G)^mhT zFbT0xI`K1>f=Cjwu-fmabNi!n&Om8{bxgxd%&&yoDR5qx$^m)Qj)~MVqze$-#O?w~ znNJ{^FMa`BD9t!{gaXglMsb>GVwhFGA2JDXrh5+{ohw<$ zU?Yh_1qE$Jbf8;LCSDPJV=Z3C2|p3Bbc)6TF`|h72sJbbCn7l&PE$>H2^ex)A+bM+ zM0Vagj%JcXkIok=rGIvvZ5rXfCM{U~M<;L{>dl+rYrulXFV21TX_un*Q1#uwtSZg) zGJ?kKmW}zc_x4Bi&AHM&Hw^<$fiI6VYlls9%Ba)WS<|9Hj_R%oA{uCFH>is7L!wbb zg#nkCzKK?aLcfGed>=X;!R-*=9W(b_kypOOGrB2<68*dVlIcP$VhJtXSH_l?3_1NF z;Ktkx$=I(A#S`a{z|rP$h%R9ry(h5IYr;v3n>L%)+T8_*I*Qhc+GMRm`&|O?EYE$* zKOieL6oU93ur*9wtySZ$`VA>vky%h*GIhkuS|%LHiEbHb`b2fu$;$C`Tvg~{u{F<& z;WSY{sz}hdN0g`MmU-cEOKU-ZY%~kpWriG`na09N3+)BDq@XBe)PV{3*ABH@z`X9X zJ;1zT4Q5Fpyv*VT(`@;E&-92#)4z+0VinL7v6!d`+TnG4bgcYEeZ9#zi|c=4YN&0p z!#I0vxUfYkurapvzs&>A`0XQyg&#UDi#jR+*xY{3GI2`!UB23$RGNo8Wb$}5F#r#BJq%3jr zRT=&xIn`0BUK>8$3Hb~r!pzA9*Ax~Gog+kant?qfJyMlaOG26$w35K3%PIej^)?2L zlRv-lOLKW|#NpDeP6m{`tNA8C=#MJuViR%D)O*6C_ z)H(llPLz)_(~(4JT0`sbCI4pGApn^=WJYPc;$J+UKk-0d!7p&*3Gu*DHko+fQyjl` zu^rf1hCt_+xS^4)TcuO#PsB4~*#s;{pc9kDUXNGrp>wErU3e=WnJm@}u2{oYL0tnLyo&J|IykN$ud`+!=3tlnR=2c!mPRvYV}L zcDiHl7pHI;eS|ejPN6BfkU)&>_>+O!)v$*b#XRyVhwT#=QJe4fDuEdGPu_CtT4arZw zD7Y8Nm)W~a5ji$~VMmUbe{4mrSzEWisNBeSq#I*<=E6p(&f~AAYu^*;2E~a3H(l-= zV0>S3UHT|Cc_1$4QR;j_VfAD+zz};SsY@usLUz}h8L7eY3MQ(BV!2tnTumIfqaB^| zTXx&Cgbj(H*AcP9+zI2V0|gZem_soTcgVhY97_&8UcvX2HyAj-qsA#_LMJ|SrdC8g zOA1#itK?exxB5RjfyC1FrTtaWc)Z%4&FYH`lOG;NH$e7szMc}f{-9yj+y685#NULt zPO3UWj_4+o9;N0vo?~=w7ru=_!SH-}-9&7^;`Ma!C>_S9&SC$V{pKqYMkE8Q?3Z|n ztG<|?w2L{u_;jq^n*B$#kQ`Z6K6!eYZhxxg!aH;`^;fB@zrCFKv%)UPRz(n13y<{3 zU?0i+bT4y%&Nu(&+C8&|fB4AM(MpZd-}+$f#09eLo5Nu!@N^Ocq+1jf-R4U@r$Xh? zqS38+$C|nt>>7q;i|zJ5GLG%8El;l@{o1^d zT9yJ5mYNhzXcBGhj^NrA!jB>Fd~FY-@h)MZoz=^NG}`%5Pd7TyUmmH<{dMJa*E%0W zv?Rl8Yff!y#Y&K+K^9BQhkbq{o{6CB4WZKAWhy-Zt>tMu@x(}tg zCfLNf!v)2c9Volno6RQuSlbMe*_xqC&_@gbC5ZhH_~t`<*q9SX9@F07$B9c5DHk2; z(vEVmS0|RxzYfxNrnx_`^N@42UoeDNCue}}k8|wTXdL#;4P-PKse1^k>k02|+6}Kd zQN_`%YdTS&aX)Jhi)Z1sV>KqVb`Q02r%XEvBIS{C-#Ga^+)q6Bs)EgRM6G`gcqyT6 zH*ifPoIR|S1`bU)ZuzVIX0Zs>xpHO@fyjU4YiFY2SFLTIt3z8I0>3;%pPksjWo0AL zURG!3dVruE%_TegHy8yOa#)5KwM`QCzP4CNd6h=}w=^QYs(|SnLO7PKudnV)x#=PQ z9iEZyenmWNThflB$``1EJ{Yy7;9za-KVu@(*mc6q4dT{o*!yqstwGo$tDyR7+qIB6 zX6YdNKEZ5g+FZ1OBPew=YQF|mKM5NhLgM2Xzv-PLo@A_>SAg4s%2~y<+t@uARKw&| zxC9|ZI+VE8Jg+*fkgr$kTVVB$3H)W0vXS_(^}pJ4*x0%LV|A^&S$Qmu;IsR0d=bD& zzY9BC-NysqBF*Ay*8?uj84jQTXt1I zOigvz?f9f`P!+T9+vDx>AhX~qva+_-Rz%Q+jUhe$j=6&;7f49T3 zx{{%fwyM=Z4A3(8{?g}1-P`l}LYQ{zXB8>%-NDaUqch(sw(IP2 z|8RO7*7?!k{&qX8X1WQdpZoVA?;xzQN6%)Wf>57f<~MB}#$V0qdsyj2^5pjE_q(6Qwvl!?)?YX=vvS<9DCKI@spcSmlr zqyghI*LS78dhOeIHDL3aw)L>n*p*|EQ05BEXwdn~Q5#q9_Ibq6|c zowtpTC;(9Hs_^K2>#B7!ifGCa>_*w4GghU%8IG5%r~adQt)9?35iMvhVlugKh$5p{ z7gl;+`q`DflYSlcTw=0m4ks6uMXvW@Cgs04LtCkn$UNw)>H2|&wlVa=GMbqX1WhLH zI6ay@p~nq6RW=b6K8F~lR_O(^aGx*ZeO;Om_uFueDZh#d&N z02zL2th`K&|H*4C7U)fk!b?qXlODse53WI5NxHl%LeK0$Pn?4(ZJb#A?XP93i`_(p zwMoBqA6lM9LH8F|V>+wsq=&zh3whXdGYT)rd$1}O@fPvs=St{=M$R^*EIOD#D~Rk` z>=!MPtieXnjXa$3ydJDp^Xc=$7P>k(FLkjX5Cpm(UE}!~U#ujyXlY5<-IGSdY2JMj z6G{eac|*ikGcQ9sB$d}AO87Q0|zAC|IGlnFr<|8jR(WT^(!;^*|mFn0zz|AQ*c%aQHP-pH#` zgv{b8UZ7BFR3AmJMyfwUc+y(-L@;P5bm1vr?ztKGbioc=D=C@q;Zz4Nn5tg*9XBKK z)S}s0;i1Ot(~F-NwN!0{#Oh;A^6~^SC8VWNh`)~z zX%3HiQ@|AA2T;LOMM&_Q1%t3TfpQ}^N31lfF&m+E#1*PwU9Q^xfYIpCQgG*X?O3eh z{?3)}BAcPVFN(3FI2^Mq376*z!St0JV&Wu!IZ+7!v$l zk?~F>h&d>G+0JnkJS2g7_xB85p0+(Z83vlQs$!h0jlxc0VhiMRJ(?ECh9Fj$I`!%E z&-T}vm6GgT%mRdXI(?@}S6DrX6Xl6iYW#d@UfN_A7(3W98PtW5aXJ%z;$!ViqUbp! z$=UTe2&ybbrQ&Lr2rO4kkFgCLK(0H?nKdic-WBDRp=ecCD2@fv7#2wpsqz zUr=USZWo>4(;d`z;#6!B)vu@!vZYbqVG#qi>VL`0MjI-R=SP?j@qo=p zs>fj{U@{Sz+jVf0YVPGgh|fTK-x*u5|mpZWV@$a=cJ(Nr}F2SMnyxPG4 zQ0T&mbm+2*G!j=f5}Oh*a{82;1+*b!%W2T!c3{ysX3#k!%qsTxbimwM0qI5DOJE{H z%lo`?>p6-j8E&68VQa_}9L~IQS;D7<%k%;ooU`zAq*Ezk{0q?VKzzjfn~jZCL4>h5 z5;0j%C;BxNap%+|o4m@2Qn13YB>4ym2CuX%gYX6Toc2@=N0)XAa&S4P;z&ZsbB2Ru zY#`HtiebmG@V|Rp`bkX=#xzsxHoDCm5#+nlDVD!-7We%pGU9Y8s2Jx827y=lM4%aZ z%vFmgMTg;>-S5dlP}_#i77dJEOHieAvWlhYMk)ADWSUuMCVVx!;szBPZqf&^ zN((n~_xxm7)3 zFURu2q^_u}JXEp7kK5H&B`NGaDKkdJB1vyoh_R=uQ!Mx7aJ|@p)%{}1AeZ~kGu!Qy zl#CK|7CaDfcrWM3u~=LL`6cHulfZ$hc9|`5epo#)PikCcxrQ1}$?9>!e}c%vCqTNf}xtZwsy&T->?BJ&d=8a@(9-f9cMT&yIp4c6TOFHv^rJ2765c7kOxRqMuDs{!def{%+76J zI$egGp@J0QlON!!CO+xghUmn-Ie><}6BhYdYo4HURo%6x@EEFaF499P60?TnLUF$8 ziX-U11XD)eZ_t!l@=KI3c0*m2r7yDkn96 zOE{ShrDE>UiPI}#hK1*Pn6w}wtPvywR4fpp(*^yo|6nV?A9or^<`*vUITERGZ;_X@ z(|l~XraEt#Zd{|93vnV6>M#QHr)L_QA60d>O-@aT|M3z}Z|@0>Pi z)Wd5u8Sd~>;1tai^VB*k6UvmqLpu)aOT zRJBD|B`H*Ks1y6Gzvxoup1A;sv?wrfwu)%UjOSzOJQ?h(#IbtStajjU1~ZX4QXb%r z%@~#dFzFr4)v?CPLZpJKouk)b)_v%Om-HvIjRDj)@DRLfNqi5mADU_N{u5N5gke0^ z27iBi5?NT$+lZ>mop_Aet9QD(02kRqrieHV!l_9ekLk#OKfkIKE@4*#0V34oWZ`PP zG#b$C9loj<-#sZjN}0NWY!hl~%xRGB851qesF1OKzr)GGjEh~75cG|GTUhsU(|+1y zg9)XY57DLYLQVVcY{>I1GB*`ZZ3%FM3~T=aZ*-$aC8Pr{?H?#nI}rD>H%QP3@$)r~ z7zI`->E@8CGKMcaIkp!oyb4p7j<mDHQTwY`MatW81UW+qS~I+D9U zo)O1G#pb99;%*qR+H8X8PqMH4>m{2q3S4HpzLnnWfku~?T`r4VvDHAcJv<#;alieTg&u@{xyMuj@X`6+-U#_pKaUo43cz*x%`+gsZ zEfanjbSf5pwdzdMvLXI#^xkLd2IrGK}U^6oCPa=5YCx{c@2 z=hZH^l;{DnPTYrA4`rUGf7ROYz#qErE;H8J^TZyyZ$5K|x3wPMEA_e@%6yv{>k{#Y5iI*#nS&feeRf*HQ=F<$IzEU-_>m?_k0hx zy36ZX&8OSQ-{*dD$GjU&pyhf?{TQW@qUN`Z*$NHUzxw4Q;A`jYXRICLE7!Z*_he5k zp@3K0F&AgAKkYsH+gNF3dNg_N(8Mec@6Y6AeV`g&WRr^49c|x9fvV6wNuNz_K5~Nd?>`!2|S^UP~!qt*+%G<3uJl z{ewZsXVR{y3@)Gin;q~|fcJL=Cbr}25Lr(mDT=+c=h@L_o7EhDkHQ zA2`_4gYMG-d>+y_UylS`FEKgx9h^o#=i{h5AO~`N zBx%dW#~>T+j_B|xtON-n7q3T#&%%^iomj_2S(^9$67t-H1C<{(xz1;RPBkcT$<)GS z?u^<5X)gwyjwQaAFsua4*F+zUHK&M*mO;7((sIIB-RS?pYAYW_T@S(fS>fu}O?G0% z)2%_jIkh9GL-)(dznT>V2B<%d|ND^j8z6qc=n8v&0=QF|Qz}`_9fzU)MCK0m_m^R! zsrk`Dki5#>f83Km+ve6vnsrn?YY$al?Z*=o2OR@b`mWxRFu*XwK%-h67COaNp+;VC zNdE=$f$aG^krU$K`m9RO&KTf#YG^ZiluDNc`(v;$n(pj0Q5cq*phE>JY!%QT2(cMU zOf&|)WUb#Z2^VcW8${7xgcX6$j{#&f2q8zOaXQx4F$9Km-DwhU?;o-B zVP+x~{VE6pGF~Ch$NY6l z+Xx>9FVsRp6r>%3i=?3~jj(@om7C4hLE_UoL?V_O`NwylzS-~?G_oO{0^1(4ec26HVN0+MK|So{`KlfQqqR4X^) zVO~i)VVAzA73CrJ%0l62smlt51p$Z*JlJ5$#4|8)~A( zlfC3|0MWW)BAx+&edoG^DcD)7*Rl0CV>W^>W#sVem!BgWzC$|;A|utFX{D-MK3y#M z|B81^Wi+2Gy-9D%p#>yCIz}_cva&QOh7|LIrIOIH#_YMICx{o$CCoc=kS>AtpdtwV zwOYu>yJhQx<0&K9wD#J8zkWqt5lPb@pFFWubPr7TKJ9+^phL$OZ;2z;E8vua;X|(< za|>ajppg}i=1h!RI$b@xSEe;4imIkqpGYdR0_jBWXG*ztry#mg(Er-f?~x3*1%{Pp z?r~G4gG>lt>JlQry`&PQ8o0x{io@T{H#_#YgBRk}Nck@I)Lj%PsPsl+o(s!#22*=( zaxxSsm-K(U5r8L!>JEp|Yv`sJSu~|*xQwI)Bv(leQ;;fdJ$&lE%w;|77UJh}d#11q zA~ZZlPy8NQhGgD-x?NpqKwNJyvn`M|j;pzTzB~?9`_ON8i~!p22s$On67nWE>_+X4yL>5_0>_OH9X*gmDFT2V{UpdLS;hDubheBrvIBhoF zi8(ps%9~k{+8eyM960L1P_}amaK^}S)9rqiUP)=qrh$1MQp)sf01Uh`iS=eyGZpSu zNO6f1gGISO)l+#HHk5e(z&|`QwaZI#2(iS0)0HNYa8v+Yyln0cjbgj4Ir_dK zv#&%IzAgE?$kvHEm(nl*ZpF#6VB#T6ew}uyKMWc+U)k6e{g3YR8Or6BXbo@>c25F{ zi;o>@#biN@GMJe+*e0A>X(f#w`yU*#jL&Fx@m?$|UuaQIFWr;pXd8AhP>Dd292I$d zv56YI=6oqdF(0a}To!SkSu+04fr6vNKoe*|nVl>WyI;`Y143)@jS|27(BgxD5H1Ox z@8=Q%CFwX^-N4;66mMZ_cb1hyH0B%#8X*n>FI=?eczI=%3!k(WK_ea-$1Nq*Q6{3+ zO*dtWB_tWg|CpJqKl4Xtw&Y#FC6eVp&$g6briz}8Cyq~5+{o^I#5h_olh(qEQn#mH zIY$=5rF`nKK@u?K_Im)sC4wg8HGkzlvy%MMujdXEQg}(Q1{*{7(K-U3lcH*e-IR$I zr(VRbGTH^Ja#<8%%7P{`*|;ePC_=j~2OspuwW2ZygYAODlLwba=DTYdg=-<~)@ zlU2HW>D1MZ4ju;4^Y}rv4UlgvLdQ4?680p7hCA9n&L!Nq%mAH@QG^Lv)(b~pcJ|2> zb!P9vkb3?1{D#-B#)}rtJi><&R6N&0Ros?Gn8c7`6?yWiX*|F{t{ zr>S>QpJKGHO`zgwyIR_*w6dHMO`k+WQc=ex(1TNK#e7MfEg* zG+Beft+`~bM%bhIU9MFp#+dF+s420};JqwiDEn%NwqITglw%3imoRGIDEHDDvwD42ew_g{A(7oUPImA)t3x4XrW-sMAj|)CtdC zt&kUXbIUNJo_i5)9zQ*I7HADKNH@S)KcZeE$t5LnPl-b2K=`>3=!Y6m9I4hq%tt`+ ze!NQhY=b*bCxupZY4DGd@?d3NAk zy`R|nLKp~HGuxlXpC6oKX0NkT;yJt6eYC&ih4?21yAiD5-RvclUqK-1fTd36GEr?J z>D(w`#E#pO$d^VbI?!oxl z*f>B5U^(DQ@Rwv+^*xx=@$j)d?jbL^h{~ zY(t2WPzBJB^MP=N>cFd-Yn7naf2Y*VM!;!=GUP6N2xso5OdXOW=9kR;tFqA=3)GK* z-70>#(Z_Pnh1AeN;Tj`1XxumE>2QgVZYD$ZaoqwGeSX=_N5q9*y&*haTN zdE$Z38LpP_%eaNrj|hb8m#|#G7PO>E+7Ma^6g27HU7THO>}OuaU+Ya*ime;k&g%%p z8LyS@J_299PeR`8D2)FCssE!>iiMMz?LQ$^x9W%!!KY3?;WT7?DB=-(Q26CK?i8E; z3UO`QQZqf|{OyNP_20=xMIv6#m}}z2lgOg2a;))Wqq8&Fsp<0Ww~3g#iNJBvPpePH z*GJrB9;Ql*M-DpRpL|+fO)SCZ=k{3tXYAYW?NRx-wA#MU<>$S^#zslU53sf2_mK(z z|1kC8ah3Yx@9OYIbyMr4tXY<|exLqCp{K<~=SOKP+lyRVt!Gw7HSL6@N=ioQ*i`e! zo3SYq|LU@R+_s*_i{>g~+_1ZVwqE4O7?)j^&AJE+*y3}=9eeKR3fK~G#UFca z_y|CJWAjLfx)4B4;HrT$;{3~*A z^UkU}CWI}^=&p@lumt5*&8 zA8nJPtCO?(?^CSnm$#TKSAt1^tY@)#(>gyLU#cuWv$~(ICy!m{YZtekZ_1D1bwq}$$Mvo;`|{0(utvMCm(}^hM!W5+>SY%u zOA?8X{UTi3MfsD|S3bV&L5&O&H@fA!w~?cjwehI2kc!BAwiwGa^%Y+%%KF@?=flFK z>DP4KWOTEyU9;wkiSsAKTl>cm`dL5S_(CDLGz@kY*}~~4D-!#sDkP64yQtR1*z@^? z#`8`(vqC^eorT%QjcInHuN2(ygS4MIAMeN8av92{m0|>8_Ebv~qDqAFgik$Di_<=l zKtRR@2XI}f<7_w_ou2dP(9(jiOw?9>p{&Qxu)qHr<_mul(XQRVgsX>ZjY_UljI09I zB2!Q1>rU3pO~th@nCD>fJl|7xXK7y;AE+OxR-(X!c4tbcCkox9_Fhi3V{# z3F%}px49=#oM0*B!(s7^GJt1YHyiuGf3KL>e^APg?jRcUaz;~fBzk2D&z*mELh&8g zky@GL47i^`QkD3bYUJYY@0EAktlFM+_Uv$AY5(<v4{>8NS{)zS_A(9kNA|X-R*Ay~UE0lLq6Td%cu`ShpI5D8O5BUTdVrJgo&g9ei|D zRNfh_jUcs#GVFYDcG}_P9je=IPe(boId)6yu9NHT65bD!?q-0#7b6)h&`&}-qh!-n z0E!e#8L#j9fjO~pWXb*^cCGmwkc`bjP$*UxF%AY66-WbSF&mM%)B&^P0&{X|IHw_5 zgvQM7JZgojEHd~E+*$l0kDH+5ak*ZWck8SiF{AND0wc2VVK!b}4)n+Py_TW>@EsfS zUkbNKP!zGafQWSFb5K?!L_r6^aFOBWQhj+KX2lyCNA_EGC)`KG_&8AU&~()7XDRH5 z-~nM~Ln60uZp^guoxnukhDN020DttxOAem*guy+Gn)ISd#T;{(5_K94r_j z!O4=SAXy`XUqhr>6VYWPSOxnj5c)!)yW%|fXJA-RV&JA4Oo3;%;oH-HY$SwM<~6eJ`MzAjQsSV==fab#;t^82k= z{+_-YKLEGTof1k@kYO+F-n5AkIT0cxni%HG$i<4hIGO|*btpESz(erpLX!f$EnX8I zjEyx!RHr8_W>@~)v(}%IGvT!eBc59I{dTHXF|j+u7F+h!odh7b)|iV4G}0EO=mEF&!ensQ?W? z&K1VgY%#A`Bu=JU*GemL{In7!W1)06W7jF zrthNXhE`b`@_y3|BwDnLl*f*44yl9d@7uwqe=b4tAqYejbIlY6?%$VRUbbsd0VzJE zWOK_3C85>yEbF7dwLVU?1w=lS19M&;ZJ(jg!{_WW0xy4M`^R8mxg5$MpCmRh6A`Au zA1MN+Ic3rW)UHQEedpV7lb+zc(IxW~(7e#b$ul0HD`Ia+9wazroTK7b9~gnGGrJg1 z!My`vg9+a4X1fYDx)P@$r>#wl%B(w})@UXd(^&UdkTt_Dfzbg58TW|WVZ z`L+#74Nc>&)uD$CKIRjzaUz)oA$1m{X1LMfY{i!w0=1)T_FMO)?Zu^qD)gT#zR>)k z8^NOyQ+1fc5F5qXL&E_ z+GPGyhfO1a>=5K@j7+F|I7SCluG4}PZ}l5Pf0-kqR|1B3YUT9N`W?{0N>2s0)E#XX z;L$=kj9B@RI~I&mBGGAE)6h!#7vM?zX*9}|JNTWwrS(ItvG=z9Uf%f?UPK7FyiEOzCT!PVNm?QRqE=A;TXW=b#Qdm}4G>G}Md|0<;Db?R9u?33KrwlgRXK5b10T+ps=g%8ZSe~s5*`AZ0FB7JtwtU@g<9!eP} zxpumQHNaJN;l2IFkVaV(+}flUu(B6`7qE7o(8jZdRUXl>tNNw(%*CW)r5b7%RT`G{ z&H+&%oKVB#7Z;K*Nt9K$-|gL5kYC6APceY1tNw~mmyA-ho*cKGimo)86>tk zABq5ktw5Mt(e={?Rf7i|;u9;W7o2|z;o!V_8ZNj3YOC-KNw?W~@g>lN zR40QF6dKGj4-pD;pqwHBmQThDPh|1RBs-Im3S;uI6TZ}l>`ct=&2R;*KH|f`$YbW=&d-eEsS5_Bz&FwgXwTcc!&J&0WTh+pHzGA8 zHQET}CgHjrUkzXeRYy~lD<_ap;kEK$%g2Oe;|&;1oZLJ*or*C`En!bE;eznlmbi7( z!hdMT#UT+)X|F}}mS<}SkP!T|@37W@(js6jlYwk(pLP3@r3Z!^U_XOrc>a|lwK-(z zuk#&qrfXb%&;`t1)w{>hf_!MjeZQ|HwueZr*D?4#wG{tlcF`6^qzZTl8&Zx4U@T&l zo51U5PhA!o_#KjR_{VyjFzPu=p0lxxiO>z%44 z>1mjI2ewdgft7IcluX7LHNY}M2`4Q8TVQ&hep>u7QBxB?Im3Ww3<7aqL6E%na)iAP z+9fjpHxiMy0HD5{!`eXRS0fyBb@VL+6+`GvO7x1qr6}V7a_7KBhM>DR+FiFm%~iZtK&k-G9e;qx}!OV-NY$#7B;K@@0)7r zRN(F_eeky8QJC!fhJ7dek)BOfnlu92Obu3)vhvDXi|_B3-kTFT3iiH<(vqxNMM7vEi1^+i{`Ro&IkoMVn> zQL6;mLojMXTq-2)O+41$25cALcpRLxI_URroNG5(;k+z?QqxsL3*7DcU|2fBuxctx~yJ>8> zBW+Blnwfe3>MrJN)y>GFu<_(raJ3w_tSxE3SZdNqw2hdpuAzAwKmWu(xzJDS#T#zZ zVv8EzfjubMFvd>q#rwCVRb9Vq%t|%xvS?x(>acRholdB?g#Ett{Yj_6 zWYeLQyWz8CHjQH|=D2XfnC95q*&oGqJc7-&+i#rFN`^Zw}a{$`oEM(O$S zWz@N4)%XyRGHqn*o8@!2#MjvRlbz9J?WK_0dcNV?pf|ohzy93$vU!-OB=ns#XZ>ih4EaFFXEn)-~?{nVQfd!ZD4YfGGCm?NSJ`q~Klj7mc&Lv%~ogPwT4KBmqo&19= zYB|B3mdX5ad;w2vQgF-F$(W}zWwY<(bo@hxX*MbQ=ZdY92C}u6^zP3@#Zx$|i(#7Lu zVgqI5wq_9HF+CdFK>7s#Y?*rL=dmjMC~MnzmSE&EQKC)c;`VIV?0muiK=ZR71-KY= zD0IXhnh1~(Y!cbh(K@pk1cIJ_;`i@|Ai2M4yW>Y52CL967U0!ft9OP{LeA>Umo{v# zjwSUh7OJM5aIFwV*4(oep2jG^@&;4ZAb(f)*UGmJkzbz`z2?YRH~}*fA(X|#Xh`ao zA7d;tQOf%w0W%~Q<;GjO`VKrk_oB`G2Lo?L3t>x*x+I74xYyugImIhTaJi}8sEnL< z<(00|QF8rJm4eBjnuh<~V>mVl5NmqBe~)rU@?uP#P}$gqMUgIIWt2bP9u)aM&jL<3uPS!;=-Dr!{gj>xuE#8=Nk@Rx7EgIZh>*nq zp_D`-qZ`WW;eq8*Qt{U~&?VdCKH@R+{nkJVgmxi_=%TmDLV5i#Y#=5q=JrDTONk>P z^x(W_igE-4QB})Mjv(QA`dDw>oB9I01Eil?Mj#*&{CTK=k4~G&cZ`3t3Z6bC4b~9cpq043nh#*2&0UPv=tVMGytF*m`~oc7{_(<9gR3g{^|@(L0o?-35A7D6=4 zf&G=I<0ejs|D@Oc6HvtTg>M4GypA6;rS1U0OI3u1`B9CUSJYu>Q&oZ{!Ab*6h)`O= ziR)fAD-2>|wwP(=3{SxO1^vX*=;I zSC~)y)sN)ka^=nG!-@j&s5cvrHIg?3Wu_Qw3d=@W-_0~kp^&9<%=>_HC#Afn*Y9H!bDeC zWR~4{@HPzJsh_zv^SU#C3`ihQ`MAY$pdu8F_F5852^yJrXt@y#(my_)Th9OxKV)th zrWvF^^J;HWmPRh@BBm&AGK@K1KEBmWEcT4dy!;F}YdJ z1Pa^+%R!IewYl`PYV51xks#loS=kDQgYJ>&Q(#gO2j1f2i)^j1F2gRO7I27?K)`sbE4EQ)czIXsYAU8i=`0YMrV zJEae77EF*CoCb~XxS2H2u&LZrA2?xi!5}qrhhW8%b9eL!LJwe(bu{D`aDh<8pm`g_ zX%o=~GXnmGoU;RO_{SCJhptjbLt-?z;Zt>=OL~u&_z1@f=kL}tc!^l^g-+7&P2Ez;;N^L?No8T@D3`Ybt0zv2QU0fxXScNmZHSZxwv@;z7b;>FM4FHkX}qbzvm zdX{Wg7cJLG8!L$P4o{1dcqUp~HOTs}e`%(#{1h^43@Lvog{9~N@q*vR2-m)VsNVg!9nry{ju@uCB12X= z(JFhZW_bLicU;+(<7iEn47Xmh`n0qFYr=-%`hKO%b!v{4`3Vf2SV6d3 z>|TOxU?fyDXm*R&S-q;GiR~pxsD`*6S<~A9AI`tU^deKF8Rv$( zzPYn-H9~zPp#6y?f-QAjoR~>z@KNjI2)`Pv!-!uNeL*+dAw?r^E*D%eWU8Zon0O9p zdcl*4;M_=`cUY%ZhImyWvLkpA3K7xEUB#KnWEtTlZ+R^*BqBOtaJacfn4?T9of@f- zPzEfdhpZk9CYMWg71oNdh=hUyX;@i#RhC(FCFxnOm`@eJ$#9XGV30P6%`Cd^yx2jW zlkHGsWzGIS?1IrzawdzK7|XS&XU0HyRjA*V6m)_1>{*}f&Ej6aFJh%0*ohaCZh^2i z0@=@WrB=IY@DEf82nSc*6L{Irufo_k1juxrlcBwngYvkRix-a`J~ z5;SC-TMi2b5n%_!^A=tsmDB?tPhz&djTWY-dMN323%@WqGf&Fz$7YfS_h(v3lB~k2 zU0^(8$jPTIs8C+*-mtUAD!Ype%!wIn<{0&VawRHMd@jnCmQvT}^BT_S1OGbUKic{j z@8_J<0hEwG0IB^b@yc=C&E|_+z6b-5ws>WoYE$S9W>cv|&vRWx}*^5L!5 zNk`~XM$8J~U_pqH1OvlQ429#9TO{A%XX3WY-MF)PCj@PzCzSN&`0#wes-udy{4YW9 zzm@W^G1C9fAh=a=$cof+Q+oR(h%J@!gHJt~#gTpy1nll-#fUH}XtINs(o)Uq;+m|I zE9KDn;YJ&RnV(`*uH37a?4afHm0L9$aT)4$zf;nr) z_i`qYW_+WirQ6&R%W+{DUQO+IQ+YBYV)BuPOlk6CZNSy4(u(y;rMKnJM|;bpi6sH4 zO;)?E?m<}drW-w8cKb2eZ;N=qu^QsWUGH&hfRfvaB8XlTibASfR%!w&UWAzw$Mm zxzws-i^gKFvFNSDWm##1?zm*HKGlBmWKqNQJn{R{8gMMG&b;MSEsZ7jE*uX&gBH^` zwQ7z_wR)57J5vV7I1dBKq!Z99uf4us8C$Hsi1GWU%lq94EtB)}^T`;D*YA8e-bv;8 z`Q?mE>xawp<6`khXN%Re_ zF}=!SL{*FB%Cqr7Y|lif$;Y?_#@kjU#+%G>&3?9}s$+Hr=LvS{)>y?R^9<;8iE)A9 zWA;6M;x-5MAYjAB!RpcURX_6VI>vM1Sciw_;Jnz@=Kjp0SKg7s*CIdW(NslmB)nqH zbxw~T8G@;fFKk&B+~hRmjrE;A)VQmMrao&Lm1lzD%PTokHxj+K4Iz7>HR27zqdiA{ zD@te9^wFF*03xL99-w?~IIFbH`~vCuGq#!7omXbNx{)d9VXJrVh-x z$5YjXR;<*}c7j4jkk^Nxj~hitKt4O+ewc)pAE=Y+8%_wGVLDV>^l%Yp^u}0JS6`3Mu(a1 z1fHe%HfJGF-qJAHgf)fLO|s;Vl5+*N$f4Y*h}cGzK=TgWLOAUwv9tnimR5}nd7@gk)yv`eQkq#)5+=o9r`!G+qme>e4F#> zafaBZ!0j0TU7~A_gbE(sz+V6WS&o0$KH3IMDW(Qb-YIs+giCw9L~ApWKG4>Nxe1&H z=}V7?#;+EYY2wd(9mo^RO;Kb@q!ALge!R*^b`p+UC!R zz#ga1!vG@&rzvH~U4i;2tVxyHByi9c?*%Pdhc8PQCNqm7I^m6PigxdCjq>%b-Elyu zE~%OguxmOA8V=7WK1_J(NZOsHy{i4LGl{b~J19&G5Q$DYW^W)1=%+u@D~5P2da{&f zQXD6aW;Za&QrijkE->DRY=nk&FqyY`>J5pYXM}I!c^Fv~m_0{m_SwN}y1SbX4lk0I zG1D(pA9Ov)s?J+HsMQrdGP5bZUT?or#bVzlS=vOs%noTr_5=XrPW{=a{ce@?9P5oz z$TzE)&U?p_QuJC=3j)pft#3h^u_Hv8``$$~boc^yrP1@zVuqaJ@0r3}iZK>%U^F`t zrW$ZiEeK=<)FaP%#ek1khF=Aj=CGukMh<&5E0Gz2@Rv}9!hpp*%KMH(0RggF#a7(2Mlm-B#jClCEve^)Lp^fwCxYz+16s@v2PR#Gs6;Kg#@GC zLuiH7gqv^d_#hWYuJ=+>uR;&dx5$Rd8+{u^Mf}WPVHAHjf}=l_<`v803uP1tQ{>Ka zMvnPGRK|7;yTS4NI>qFdri)5MNjIh0l;siOyo!pxAYqkJ2QGn*t`ovz%)E22{a8mS zAcBk=TX&URcSh5O#1XWxz=9IjV%1@@){+U$)lD06@-ZOMVq{6RM@D?EZWr~=$v;!@&@8E6A`=U-uy>7@;8D@FN~lvw~G+@pEP zSVSMA`gE9vE+m=R&uxBOq#qYyTb2a0@|!wVZ!|yJ_2V=sXBYlWt0q0YdQ?&y~F4CTJTdB=_LY&fFrmJTseNY!c;%@-0RLkY3pSeOT zU^heMLkiBb6@^)UaN-~>u8*$+Tucf6B3Rzis+O<$2Xbi>NQ~Y}DB@nK7+WR^2N<@B zQG1Mtj?X5F6;-cd&#zZFr#a(*sYMPHCVaEo?!@tk^I%W%u--nqAu3MDCVA(=(VNz* z%7G-hF`_ABk%IfI{6(@AWpY5y{L#tAi>s*Wyzvjicz6b4+A-V`Q0I(D*W)1D;I!zP zqMSd`B~er%%kin=v!?gwedP0$9f;?(HIy*5epn)tuu{54_yK3&bJqNs?d#4V&qBPH z6jD~u+Mclis1k?I!`DJbT(0`d`C?IJdaP0Lvw7~4Ao~#xRbekaJ7s^lmV6A$I9CZ+6373G$>-S_9E^k@`^OOxHLQsa+SwzQrH6Gw;dpD%qzKaL(buG5nSaJ! zh^t)gTf?owp)cwnaMwqS$jIYjFp(BMu|@5!cWq#^rG6jNXsgll)~^ zS0K&8U&8h~0*&a)=Q9$J|p^SUec}LC_ah z14=4gO>7V(ZvT!^%aMcI_upbfs7FeCAq@T&)rFRkZg+ypSR(CQ6c;DJg{!>Btc~6gq>ht( zhc-)|bLR3Ltz~L0uNCnTcd>rZ;&B~LJxyBreoXYeEO&V*+-D@Tl*KX3F@8vDJDfOA zf&nR`cMRwv05RciX?QMJsN}ni;<6yW;E~~Bw){)3jNjiC-vpR{qV95MVeXVfu3Mgk zOfg&~&9+Y^9zkhqp^|(XkRe`{$^(JPDKg|KibK;+qYr&TK?!xI{kSP>fl=PBph|)i zlB1Hk%rSQ(B7*4NyXX4g6{q~D`PA~TG>8{&Fa)RZr33Ojw~T(ck3D6iVXMtdE~PAl zN+^Fz_#oU|0s1Eq$!MX?gvt(hQoh~V`ldveA`K3;%J9$?9c509rO%MZB}+PwRm|H< ze_3YlY=ORa!yzP9ddPMRL}~MPH2maxbHG~Jl_>oWr7*G(93nj!5_k$|!`f6Xd6QdS z1?dmi1!4(AW*cguySu+1Vuu(@ysm0v+2Jq=Gr9|PnccWGYygukhEm;5sb=IWe{m1(Hrz$9Jok47irLoykfX0$M>HO zktnm7kiidZo7OnTaQ(Iru%PaIobwlcTSJW4=#+B4$529I4c##eRGYJ^NC~pHazv00 zOOZ_N~hm zru4B4gL!465^6>!pNXbinvfW0_7_V%uen;Acb^^?p?@~?D|9Wv)|rz94V1LSL(X&I?=&y_8%pa9TJJzF~(-t zy{%^(HQNF_vneYw{%)bL8ds$|&-2H6e?D9h;2&A;`1SPoW>*ftkAq!*BdH#&@6R_y zr_Ja0(c`Pv``HR9lkxNCn^DJ?dBgRQ@o3KP0HWoHZta1ul+<>-!(+{B#@Cqd&&F>T zQHFxy0_(E-f^^1MkN@=qb7E|ne-G+f>#cQTrdMX=J3KDd=dDi9^YtbT%9rg$3HgGUw@j909kcQCZAGP>X@^Vf%J7W-c*kDdp=rWxPu6v-@LLE zkU(vdm%`@(zSlu)F<&Gi^xqD_7wv-w1LnQMP=@&tui=GRZp{(>PSpcf2Cl&CKgNH& zymA(drctU%9iQszzub_@ZU|2@(cm09%Jbnk`$8&oGXUOI9+5Hj^3>uz!FcvY_qiWG zy0R>I-=qb1_ClMx!SjO@Nsuq~WB!G-n6BF;Jv*Vv;fLJRJtul=CN@ z792&O`Dh{?iivkj7x&0{Z;Hrn-MNQ_rTb!>x04Q7Ams+lJBhgr2nxw)oxCTyGOQrS zpE+m5IWpw<(JMtxswq;Gxy<6!v>>e_-7l7xT&SZ=w{9-)MHl5nKEK*~lP9W*5HZ=O zox!GyPXwd%Dz$B2xW%k$UVO54bvm>iw9z;^#OnYq*)AuXEK5n9WPUlahC1))N=$wwgS6?^7cAb^zth6ldydHu%i4AsO$Yo^-4H{Z@GWkSw%;9z-vUr!%kQwFUdY50ORm zl;UGMGlt;^zO_gog)`zUc1ALJVc3e}ZFf0~t2+!VClv0yJ)eeZlZ^x&xS?f&Ri+Q; z;b4G_3jGau^|3=jn$qu%ry;^K!AL<|-MaZeD4arAZ}3}WG&iA_t8TuiQG{l1k39}fSI{GR|W{q`8Vb#tErB%juORB z$O;rd%|M%pF(n778Ho#39Qr#ix-;--aV z^pK_v@Sez$4LF_y>dn~CZJ@@uFxBZ3jr0%ezX z=ZOweXCljCcNnLEqv`5Q@J+ygv0QeF?NSKQp%(Q6XhM;|inaw{h(ITA4wObM(kH_9 ze5kSv=@+a|_O>ah6{-_wun@Zrlwm^x>bB?9wbGwo-V3?o_Ny4l_5mrtCIC;=1ydX< zbd!Vzi=@*E46d9QuKVdUF}yeCGqw#mnA%tR6Jr19{;nm&0&#-5T6zLV_rt4lD|qfo zR%z}4j+@j6oVfLz*W7G_r{5;TN=5eDo$(|KY{FDEk6o!FkTl*-*^E8$ICVCH6+(RmR zon*c|d$7E(=@a!&a?hAqGeRCFh&-#Qf@Gfjv@;c&fa@ zyq7=*1yoQEGSGUQ>jWv;A3rI9fwOYiNV2tdS-6tO;`jUstcB~}z?%D@pwHyD(a=G> z1E40-1+PSR1_ zr9~)%2IgXm`Xa*}ZvNZ=F-{MSfR!UrnuM|dk)~Rw$cbuWrH5ZPaw`Rh2e^SNA*mBx zh>!iq+PR;s8sxkcdY^|FnvjUR=6xFb7(ql-v|S;~99|^0g|JirU48Zf00l*g3`SDi zH^+$I!a4o^$@>X#4M|`_W@;d!A4t5o6*CHilF{kf%Z3AeB7;6Mlu(S6jx-|2lQu!# zc6z%mAZn&wa3MMn^0!CtlLclrjb)NB?`bq0!u%4VuTr#oCu@#Osnhsh$>zd)V3!GI zDm0NIgKXL>tBUunM(&~)gm-Og!7#PWs4;@5VZJ12_&|!q{AWZ16yu;m?0y;IhD=}q zY`;om&+MtnvaZx!cfCU7n_PXlE2Q zC9$708Uk3*klV(`KESu7b%z1bh6;BfS`Kl}-?j-8fu_gBa-9@_3h?mu9E$NrA~&*x z&oWaad_Ts0wPCrn9)%J|Y)_REo-Hry5JLx+N6a3GGynD(41oV^g9wx4c!0WDLuAsa zlQOA=7?T?#bUXmvA{-;!*E)VL{0ahW4`rq#d!=hb?r~4~w{VRiH=hP=;Jn*>P!~+kI>Nu0QfxWQU~&i@L2T1in{A+!YhjkK9}@Tv zJ7SDt{xa8jTm^%lNBV9z>E$`J-{AI54)p-T=S`ATn2zbfvfB1er;WH`4z8%_VWV`* zCYrZV#&Kp0H>5*@P@_8P=%hFv%yt=(V!irk7U#E|Cm3uW)05u7Wvq11Iffa#Nd9CJ zZ|^W%x9E&RX;}Y%t0L*o=xZnui!V{}xUjg5f|{y!%s+&v94GiIP9*^!NWnhSWM}m= z@MT`zqf14Bn-B}=cHu3GS{?=bLHza!%$-KgEcpf1;$9YmIGE|FW@1_x ze5E*us%eZ)#iFG7mCMDesWjN3_$BQb_W+$MpR{1OKY$B(J5R$5apBHPeRQ{$j5#WBNddaBf zxPYZP1C3dSvH5KB6fX>%GjW>(QjNG#P&8{sCJ>)_M zUG>?+ONoW2HHB@YrBP=W5Sq{chL#9aLadtzGk-^7@Drh{nI(;JWI(Y$!cT znOAHYoMuZ?4gvS?=Q#tjai554*;w>W;C5#QdAUaVh25HURL_?fx z&(HN=e#)9Fr9rmZqP#w$>_w>l8|UfVafO<_4AR+Kh1!ooPk;8O)CVNSKQ)xv*CeUI z8yeX<4yebTh_Dh7*Ap%-sp6fR(izG#tA&)m7s&F=6WI?#iMy?dh(!rN-x5&20hu{! z6pScaAlb6~)4{k!B2X=mGugj5Z`P}xW|5LC{C|PhKYmYYGy*vY%I#E;jS6^-8;mCV zj(lTxaYB>JP81+jzKs#i1mQXWD`{ICsUKlW!DmLiwAg+81FH?!bo6n z!ox^7Mxlp)=Y~$Gun5K8@zF<6zbqw{2pkNAz6Stn#h_<&cpk*rR-Blon1n3m)R``e zn~O@w-X@UhQ`4l9(&90hunCaaB-|#31bJ7Pli)X#s2{;kgig5{bQzFC+`%w2#s3;O zxT8h|Ellx8g?-b@9?!R^tF*E<3`yg+C~r+vm^U3gYJC3J7CtkrLXa3~;99vLa;@BW>BNu*qy& z+yA2^3iW#Ib-rlu{qcD_F!0V!O&`muoS5`9Y`FR9{=C3j`=RsI_Oj7&|G2ts#q-j_ zbo?rA*ZFy}{AE^uZzE&%yq~XTq!f&n|121>Ivzdg_eRsIkT!0yn0^j=Ps6aW9({Pr zJh)zd7A#X+uU*ceK8Vxx4D&`Ij}*^Cc^XivQoQtFnqH^o4ZZR_q}P-GQRE8xhk7u z<$}{$Sv|Jx)BAa-`;*@Nb)$IL(`4+7oZ9+h#FspE<&e&$_v+DR5wW3?+p6;5Q}1>~ z$6Afalh>nT?Vz?C?c69wuH$QaF!6lTifZlKZYF9-9s(X}V>{-8BjwZF5tfFB-qjdzsH_Zr+Qibo=noSE6P(<0KV zAtPM0WQ%+GX~g#v@&>p}RCER&m^|{Xj=Y#N)KXiFy~iSz0uxxnZTVfaGzxvR5W-M!33)oZ|xu5zd%}lR`;KzPq zCZ#d*^bGf@n{K0kLS~aKP4d>a@Di7wjssDT!E8f&j48QPGWJ_kDOEP zG->M@DxDV!eN;@efRH@@>mZ%UDeeelqwW=(dKckyO(*A!eXkLgP>`)Eicz#PLH>wZ zKdNI4dfc5df`M%x-?L1ct4rvw%zUG^;#>%#y@^2}BY@yJI8%It0+$n3Fj)w*M*ZfdbGz zwGvp_CcaG|CL`A)f&4h0`p*m3MTh6P_3E~v1&42-M=%xozQG;m(ciDUt)(Ud*c`Yg z&@uZa4+Jt^JYGSz$zxWjO0|ShAdC*@?(|F9ECNvr5WfU+HHhk60o<_!{mo!X`4Yhq z6*0)H)Di^aOfHapqXh=KN!{gk(T8SL&M7PMRx%$2xo>rqeP17li65)JyP|ZG&;o_> zpZvxrj(;a$Z{evrA=v}Xe4X+bgZ=y*oaElLX^B&dcm7Rrw$lO}S}`0?8ul8lMJ>j; zC5XPNQtlHXWzuk}u{Hinss^sTDyBk><-W6o85NZ&n3))A7%G0NdiLd_@9i0k;BRYc z1zftbP>99XiDwpJKg$3g@>b0cE= z_$9R;WT$#3JaC47I|wf~kOmQ{zIB|=6-$-oTCG%`!Z%kE*0u+gh#g0gC+5=yaE!!; zBe${>yOC|&6(_#paRm)xT^Dpfn1%#y_0zDm_wsO&9Iz+WDvNz^quwNtQb+S_?HaYH zS*>c`e zr-)4eQd!!z(qd>2g8|Aah)qu66`DFrzWW8k$XS_;#mXND{DvQoo zAjeWcC3pE@P(Ze{tkf>RW`-#^Gq2G1V9zXOf;(Sq{>RRiIFcTeNhSwEgyXJzvK2Ld z)GATG-4=s?yrDl61W0Mm1z{hmOI{A&kB=A$klpNFTKb*CI&W$3f7;({ybWvmm`8Wh zA{-~R)Cv#6DL}%kjRZ)uzE-j`#?Juq7T5X}LmpemQE_EuO!=ia{(@Z-ej*J*X#NDz zz~E_r7O1g?i!E#wwpyvVd_>F%eI7of3brp0P8-w(HZ&@)4`&lpU-F@_03m+=6IIU8 zD~F>L+VQRe_@fHbPiAN_xy&}kY-`(A=TWkE{6clpRTGY;A6c4qB1lpPQxU#b2Qz_p znh6;LEEpTFxJi;d&1NH3XYWfe8Je5B9n8SrFkK`pC~^Z@Oi4Ni{9A6tdY*!5(5Mf1 zwfP(hn}rST17226n|Z=VL{AGJ=kNjXXgB1j&a^N8ye&Ks>YkBB?BE3hg-=+-IMLK6 zAL-QI{k?%oxK7Wh8Qqf_R^G*k%qsy8?@{j0ja_$B-i=RG!@yKcw>Q7Y$-~ly^}|o> z4N2v6Zk+dLKp6eK=c{SGj$awu9+)#*M{=k;sJj z7Z_y&We(Wky7v2$upJqeM5Xv7tPdep8j|fBxQiBJvHNi$-qtpwU5?S7oP6SbIac8Y~gJ_gVL*nI@h? z;ba8^tQlmn$})%Eda#L9M);P0(HKcKC!;lDb;>Bz)@sL+Lhzd3)+k|D6#`tM1`6Nx zad4l`k8vKjvF7qa3Wr#~!=^qPE=eKlsCDIaPdKJ8&=pWl16H~Co;9xM7HFU`FK2Q| zq$z`oJY;L)I%!ft%NxK+P1%txEE4w1#0uhLr)s=iyVXemTSuq^4-&zzC)l**v`Qq) z*Ct5Gn*pd;U1iP$1@S*Xti?$iXvTr8^2^`1!@Y7|3VXiL4!FVn(_8nC&QT@gaX}@U zl?&La8en3HJp^+_G9bO6WKcQmKq^v@>4Y%=CxF!?;^ru28_C zUoaL|(?a0}%APH)ai1hOQEFN^+5==U5o%yW^x*G_IKVwpx#{-So&iD<@kq`R8ZZEe z*W3#|(n(C3ApiwYzHD-$TyhF!MC@Uty{sQMM;*AsnuX7Q;Mku3z_EpKz~%+y=0%-p ze=n5o;^=b#D~EM(4H^7?=PBpEQ5!OU2ntaV#>%xMb9(s?nxK)(`lE{!GuRo*5E!t- zbXZI2o6@p3uPd%(BJBwhVtm{$8*5#GDUx~VMQ%({AEP*lDPA5s`8TV{dx>%S*LD=g zOQD(@6a{@ZFf>UGqWx<-ikZ<3O~`rpr}S@H=feEOpgQ`AJD@GY5g|_e46P)cU?W?l z0f~=}R0?_jkn#)6+p{8BYnp((<{L$BJ&cg3aDaTe881hR6J7ydki_DQz6!EAj0swpNAYzm432McGzn}rG*S6aVKPC? zE(wkON9SK>S-k1%G+)P{f%1#gmA3sc@x}r46j|vVEIs6CTfsdmo^z8WaRG#rJt!uI z^*IFQ^C{=k$Z>Fj*kU{pg|*ijOcaz0xy~yq0fVD{g1F9-|9a+oAhtjyGjnzCVWA(I z5Ldt7iD$R(@6`D?jedlNp`75T{4h#ml~g92Rb3`|(RX{%teJR_2;b^c|3k(i!?3|OySg-s;1e5 zioi<H3+SijjBrsUz5fZ+vU&5`3EJ@stlg2#V0~Bhq0fp)p!StXo}Eo zB_{^5!@qGz-dV2FPJ`qkfM(%00w@G$mW5oLIbw@MEr7=>#02Z%q}N0IAV`~dP<_8Y zk=D4`oRehPO-=iOGaEqm-fh@Id3AoNSjXFkMfe=%nALxBMJiO51z9>>Z|ODW>nJ&< z8UYz*zV`lET}hi2TWN%JBQOs@pVXa1{$s2%q5C+dU=r;>uZY%#Cx+ax$0q>xQye{ag~lOna*Z#oCdMWA;)&TKUkr8k zS0wyviQspR7$?AEK9zjvgk*wmN`IHrA}IK8?bHNH$}Tc)%tdX>-vsUbi3pE(30WS? zo8}`fH8HFI<2mBv2qFi((;!T8q|o9jGF!0Ge7uYpq#+dXyB8 zoMU^Oy?d%?i3TFX6Cfhs=_1TSYZCQ0b484a63^==T;(IyRZ|+s`Peke%YBmi@f%|( zATUHIA*YmM80<8bxJ2If>5DG!>C*9O|8#hR76fS}{a-TZf2$l~WM}`M8B{BBa|Fp} zrFPb_@2W=+AG^+#?Zc`u4lgc|8r$w~_76O`jwVrbMNx&h1XJT0H%95^!b(ML%@)#n z(Q~ina>F;V=+B>onx9vNZk4HCvlAohsHCSEzRDfn@}HV*N}?o}tDE~BUXAP`D<6`k z$a>tLx3BgbWZxdu++3Njce$sh;`h(Y+U6>~?+RtMKPp;UR2NnidOpg%=e0lOdf#dk zL|v@&?*g@c}UNWjR)y4f6wk@RXz4(GyF}F5CW?GU;fQM>nVEgpkOz0Hnh*KCRX+ zc$SMslmK+**^^O+y3YuUE1a-$et7iJkVA@#YliKzSO)e zOd##Lp=$Lp5J*)gjKN4>;C?j1kC9e~APQ{@Wjo!IVd`a#VFz;xC{u9icdp178tPM)}5mEsS$1QpyYILB}Qg_quH8gXF+1?@KA#6wt!% zb1JdEipcG^=+jzYr!@W7DZYI)tZBRUdQ9Tok$VOxG%GOiier^K^o-$AI(f*5tbeZ# z*J$Qx0r#HMAmSqP4Z@|Kxw$m;Tr-j_jg& zjbp*Ct|HNb^P_Tpe$t#_+KiwKv-*m*Lb*~?iYI?q(_KSugx#{t*alOF2lKT{U)o#f zj|@*b9DPf&=F(K}8>GzivEz1Kw%o?O6>nq6l-GL5$phd&Ra$0dV^U_m+!>8Q8(x`- zhqo*?tCI!htSN9Ttxywf9)dUKY+2G6F}&KA8xOxXONLt&7ZY-ypmMxv3!VtuZAOgv>}{aJwj zO-k9Bimuvrd)RUZC1dvS_&v-sP#sD$1U~(4kOV4ZG519&^Vp&b%&eM{^F3}^2xU=# z>i{w53hfP@A6S6D+^V}BO-n7WmZF7F2=rSUMj8#OHu{3Io!&+YVLf~+^!mMnTDjp% zc#6abP|L`K7+MgwGw9adHE?sL>&U7vNh_Y~(cvdY?EPFN4B=nRaPx)O({ZRafMP(b zm-)3aN$HupB#oqY-2*BQAM+TYOPP5!p8yhR#6vIvdC@V5rCo|ik68oCt7!kOsdb2v z{KZ@Y%P}XQD?Z)lBQq`vJsm9;gD`*)YkJp-SP%ypz@m3@OoJW~QzskX=^KDGq}W8e zMqg4s4R>(~cHC0HEwz|9LgNrOgYTmW;&SfmKv)XQLT98IVelE-$N6%@+qX|Q=HM*#V>|5}ye-;(k=hcwg^YchZPT`#7keaNaGeMV!jXPp8QgMS*I z>);4~;B>bDV{!4{I-;;iMR1{*e;{DeusR;^`=bc;x*uUMjfN0R5wRT{` zq5-bh0_Z~xNK(|;a12U$xvi96(Ei6-%6;`$0 zakCG){+P2LdJPkF;Bat)w~J5{Ng*4@4Fr6k73T3vjQIa*+7j~<*agZhJM-{g8s~|f zGWTsRTe&2O;x#C!p;=R;TH78 zscBOG+V5&U4x7)#lu9$JM?mQ2&Pj&TlK3XT4%;<9*t$?06_nbjo7D@LBQj#= zIdCv&rZJ7`VgqZ}_ldjn10s;JBTdM+a+7G{s&bHu1PZlsaW$ud6l|fm zkK!UUz@kA?E=YvLIjDSBKe>-UHV9#$QzgdOW8XIPAmd;_liZhEstFW_TMx0FDV(AG z-yD_uK?Rj6D9xAyDQa&(cAsGoKTBc>%OmwIZh5+?BpojINE8yn%;|cehzbvM4gZkk zbzS+JAo+eS@lbt+i7awj>s5*gZ#7xq%J~3PQ4wj~7;mrLXo^>_GB_~BI;Y#_NLW8t z63zfOK9p=d-SZj6sf{yMEU*n;5~%#|h|3za|A(=6iqa&CvURi4S!vs>v~63Jwr$(C zZQHhO+y2vb-s*GDIivgDe(0CEV?;dcmxvMZ#aeSN#9QsxCz3gDee~|z1sFOaiPyIb z>#NxN47@ecy&-1xM0xe@@-Je^s-|nMa;&u`#-Ba64bt_Bdj~D7zIR^P$qqOuL#?-~ zGiVN?WGjympMcI*~SZbv=_VVH|DMWKD5AtTe9Z2CmC3d{Ad66h03yK9;s=VO3=aghkk#Gle*Y+u_EPksN0bacJS2qK5NBFBchh(bW5Md957~M6ocCS&4 z2pnfW4)_wAxXrB_q$BGtkYWGBo#>5&>Ur2l=59Y-(j9B(^plV@8%P7}=|4(v}3c%o6; z1=uXuo94sn$T5#QsqtP;@wd|_J}r0DW_rbgxFta`#<`IdEwdAXFKzwf8X#1vU|iu* zo3NhVW`WA=%1CU+-u-|IsJ+Tvw~g~(q(^uDnX~N+daXYQUuXvI-ory-wE>#G^jW(C z29F1k`Nl8tvF(mBbZ7GUKOv^M>Y3XuddYv8nl_T*O^*?)Bl*UghG`JqHGK!~>=zCO z<8KfN`)E%a_>BWEtW#8-R`Jm_Yi_s$HDcVh$D$LDF}7E1n+w#fpvO`)bQXZ zAf2A=T}E<#G=tN?bH{0tE37S!{v^ehM6J*?yB4v)^FOMp;L~2^X%}bvippL zOV;P$1voPpU}z&T5SnJub)UBMmlC?Qz)gyaxcwxwg44F%xrrcTeVV#EgzkS;b*(m( zAgZl=+j9_GAA|Kvsf45t*EdbDp?MGn>xM3~-4ev7pRK*`FUN8%zqpsaFOb*T?%}Zj zTqZposLg?{Sl*?VY*5`)*2XjIA1Qicwo{en*^*fTo3Gj!Npim9k;R8^5llfPI`7zpHpx?o@ye zHaj-3icY+r<}Tm#F8Qk3PK#7Ovo*~4ei%6?G<+Y5kruI3pe=-ap1z!V$bPAzE z0r=8(Zp&3z&%(O+8aBo95c+_7xHWX@h8PB0+l=K>D z*;9gkKuCldUH|`l#QZ-^mSJRO`Y(K>xgA5@8vV~uSOx%6d8I>wM7zC{t7-J~sEFD@Jp!Zs1Rde;z=LUDfDK@d5aNNAQ4g2t%SC3`ax6Ed#kwot`e9`#i!%b zzSk8;MKkd`KK<$Wd~==r(edf96Ifui{ko!J03K5Z3$p)1;QaNL>#LO$$oFIS_J!~M zw*Nz$%d6}2tTOWtLCpyHEFL^#0A4R$-v)#3A>m;ZvZ=UnfdM zJYPUDk5gwJ1B6HGm9uwn*V{({q`u}?OaB;Yz->j){oG}n0WKBN4Vsc|8CuDPjcLDgM_1^H&DDe*yNw7o5XdT38SAvsri z#`J0(MbY)-x2fno)9op~2hh6UEw>+V7Z}1D7Jr@zFl)+T4Tz)wyuyh8Up54Td^>Vo z`)k4&c^n5OucpSNvN$qrS3}sA1KM-H!Z$=ok^8kKObBskX4mS0QH9C<*}#zqb&n9; zc%H2^t_FD@nTP!bP34eT1ZX@%=V(cK`Fi#c5CeUVuzKfLhVK!ET9Ib~=iP5P6u!lv z7B^4F6E8yjU|~76O$R$^LD%dZeKHy?36!tvB`%R!hXLUW?)aT#>>GsqRqCIaEKj)z ze8S4%Bl*;vOaT>Zb@)Ga+OWa(jNMv^m$1jQlr3lewjoBxU(aS-5a3+U^Q)Hr+gER! ztEqqaeZ`!!R|DN6e%==`Je#({#Q>NZpZo=Oi*)`e<|`f5a*aAir*NoAKVKJ>CVh1Han3ap=^o zjoxhU%C;$U*`JwtmVnUh|%%6wG`>3i|T>Vu+8}&d*5rq%%v^6EaK6#@jp>cDS8q zRXS?B-RsG4q~_B1mcza`e0nq0Ke{B%=G6=D9P$MhPwpBw-d(tIB-%ok+5ihb2^RRu zN?~pIklLEuj8$$tHKyx(mutJO=fZdrYyKBC%-N)|iPWDKF{_3mSyb*WV!gIfFWTC( z32*7Qo|d2K`m!)y7c1!Q5wR09Mhk)Vjt|q#d%7t?J8s3V(_4PJGwDaqW9g?HiO2nz zJ+&gy@UkOE>(XLyWGWidtx!#M;#Wa2AXuTtDfukzZAxGD0$RU zQyty|o!#qbYf3>^ES*^{G!(@)m#RXgauJSNyc_`d1B9KotKMLg zob@lFvW`N@jak>}c7uZtCG)u1=!v??#`LI7`9CiN09P+3H5RF5!VbfW2KK{)3BS|0 zP`zQ5^H2khQhIgcnO@3E=irva>Jj=$p0)>9(L1MH7n8j;-{%9ZwYHW>H+l`n+dHM@ z7^0sixxJ@4sDW+c$$krO%ckh##Xsj7raikz8nhgUuDwlnLZW($r7?y_~nDIH*stv49TlnL~GhkgP+>gAsmN)jx`8UY8GQ$T-ytPUt0RR!R^GM}s2v zi(^p~mD@u6G8j`<8A*~t5J25j6AIHnW}e$S%TuJa?V64$Sn7d6a?jRqm6GopK+r^4 zx;Uo~D99&t4qtH*7bVme2*U$M1olgCfO3RrR_8_lDThU^S%c9@ZU%>oBj(l)l#$P+ z6-R8a(;pPh1)o5Y3L*CAN1u>9;XtC)sKzA$Zw{7J11*5hhomKfMr9ML-(W>WD}pt8 ztjdJ)ptZFofh(q$6o9U7=9TA~QDdHE5#}2JkNa(8n>Nbg zD?*usRFqFc;i;ylALP~PAMD>xny5m?ZSa^;7OuA}ZLFa5P8<1(mWmK85x21yk!0Qs z8zL`Fj4rx5QI6wwx5T%sXVJgj0*py9!%VMxlcixA62 z27@JeVhUn;k$n#=j!Deui2exbDig8N+=418p__HQ{Y5WzC|G#8dc2IJ3KquB8g6V8 zSu`XU)&8l9Yg=1hXrU}zNZtdk0Ks3_nj$UUB}syC)~N(N!Va5}lJ%-->6xanL@Vh6 zX`{v9D1sPaA$uO23XyY?-;xUla}#Nwm!y^*MG4=bR*f&#X{a{%%3S9rht_xV2L#nB ze-4@u)k-%>vq%lv&W7A?wP1Cy1m91lK}8#ST-t{Ab#(V$!xq+Jv@qW& zSv9FSdaBYebZ*n5xov<`9n(zOWoBSY{t(OCNYrbz;yq2KVdfdgrm;NCd{>uruO)S= zI2{qmxuvC&2aQJl*>TXwOi0AvTHl64sR{OPb6!k%i;RNh0oH&J>IqSRbvUZGMOndk zS~cXK%t)2~`Y;FPF}uZCNTV=j()j#lzn6H*X28{gzA6XnESdPv1G!Z^(t%t6@;7F*@~QU*`;uI1;Qh#Hb0mH-4T0rQ zh{yx^r_q~&>#gh^LvdFIGW~4~+2RsT!{XyT4=4AdO8V=rvlN-QOPU{7qA48e+VDj- ztv-D#ytBmT3kdFC@aNU}s{h{{WD}}r?}x92PPNf!9jbWO!LQjB^PPYzB|R=QBLzle z2cyLHFJ)_Y3ttIg9m1qaG$!2*NW*;p`Zkt1PL@UfxXW=Q=c+XwI+Z`k(VKH&8UUc- zCG?88R;GbpNNkiu`V5V5r!TUwn`;xSNe2gMY=8*&X9;jKybaK!U`8c~s*#P64AN2V zzF2tjD-wm#sZeXM6blI` zJQ)NfAjsI02YWO7HR^vz^-_Zlh;MF+wN3V zkd`kiCrhBjDf`cLc%VEa8@h6-LSt2jFZIn(JEQxu;oeg=zi$e;T$}koJEw2pr%cMe z<~O7}_=2b3BZT@~4A|tZVxLbIwQ0v9$9%V$?S!L(Stm4YEuSW<*btXVe(;K30v~N+76Fh|wed>1LS3deZ_|$G70NvUdHC1e@_fif!5%+st_*pOS zS>L%wp1#FL&*MxZooZXigHuq~=N`JXbSB%+@ef4pTtr9s>YMPu{|)l|Pcu~Lnf_~A zqUL7I)(GN1&)J?MP%~gh*$kFT3>9TYh*luJI^FWoIowkgv)# zrlYd*pJB&zOcdv77YGEVcH_bl9&5+9?bBi0mrxla?N^BIkNw+bZiw%{GM1gtLI#_! z2WdVKM`e5mJfPS2&BqPq`+f1a^2Yu3d^eTthE~_x6r}(5dK5ZlV~>C_&tr6dJ9Kgt z#TReq>mgu`Vr$%Ssk0Ye7r;gg2fM_wW~!BP2RuZ?1Q%=>cvdokOwAd2&TCT&zDNz4 zuJYP%Mz@rKxhMJ9+DSz@+n~<++MhvhBd+0e*7xvz?A+h&?;kPDbIuAchv^#Z+jyop zu~oma>8*+r)iSPeUA)h|hTf}AK_`W0zGk{UK-jT2+kBm$^<_6tiWQHcVxl`FFQ4{d z8{yx2Lkk;ZZ${w#omu_}`T11J5c=--%pMACo-Onbs4g$U=O=ggr#fwr$239re}|02 z&*75?v2{-4+5(E7N8XGws`+}hHBA~+xAHuxWbOv4VSJK5JT#_ee-6Nl8tcYmt7>klN?P1o+k2W&8;&ErMa)Lc^px&-<9htm=8 zy<52L%Y>z&7?jCujMnZlYQ&cJD6ZfseW`=t{~Q_&#Fr9X@_Rco3#a0_ ze(O_iASXx;xL1IVhbaOLwznU^kl-1ZIL(})cG#+ z;GPyy1C&`HXqxV#4Sgv(%D=jC?v`P0M$mIo{}DUgS0MEax19WWJQB)^#&$`aOaBwS z$+ds#m~amGbKAHDzE!{9Lnan_ zpd!=g!_{6I3cBA7^3z^)>~9XAtlwTS|@+ zP}Cqx30TAqv9VKB&vbfQD6is;Pc^k{@GNT^djp!!em?J-+3ubBvU}UVpBx6Gw66E$ zk)l~~s9gMMnbkse0!%ut9=oBp#Vv*YF^5w%%K!(^C9%)KbXLK2-Hz!85`)<+=tEZBJgy!>etcX z8ssD_#S7R&RxV@oyG4(b4i=Iq2tN+q^L{K)INIQ&Xc!Qy+IvBYCx-qN8K)YtG5tjK z6Zc+$SU{K_3D|G*3$6elhVlOm1-O<~gu*vaSY~!n{|gGHVoH>5ehH`4y0?PDQp402 zDyo z*>@0#jP*ssB73LFr(iJRK!J+pR~h9H=NCTl1`NGLp*Pn-3g-q9wS~bfZQ%@iSW{Aq zLmNF>r64_O?O2e=O6oVo{kFF8>y{2Xlkp(J;2Hfo4KS1GW(>?U(ux(B$92qH>ey#i z)f}dQT?*3E5zSO1U-iezlzO!9p%*S67oQ;@hAhTazNCKer_8Q5KVcb4`wu8MDyCRu z;Ui*^gi3XqzPK86p)nL@L7CioaLL z!h*?96Saw|$^Of6ds6OO(XZ66uIUeha+>-=CRYm+Sy@fTU37GG#O=X=90b z#yf#_JB|~#JkE$+Z%&fo8N9=7C+rP1f?7disj6|ek9Zw_=MmYcDZCdTgaCi+Cd7=x zqQ9g>Nrmqucj(8)KZ6w?KNyF}X(L)yT3)Rn`K*)Rc+;PAC5eor+?p^ip#}|OM+0Fr z8WO*6Dx<{{n?0{R_kW=P(O;MjWlCJ^9~3xpPC|>c$uM%JxNLZI>nTl$0BG}@QHfQv zfC;xTS=7Sk)XADu9gM7`f<8#xNAiys4dB_<(qfy2YauoVzujN#-rT?7m+A!dxgpE- zyNOxE8-Lr?!0f%vVGo)}`-wETWWy)$w72iYE<@mC-~59D333%29gY$afe&)ryD2RQ z1h}aLUJPEXv@Sx|Jb#Hyq1p)zGgj4Lmz5eJgS;t51Lc{rQ;`y0RHZis3iVS=eWoDsl;UelJOaLMs$lUL!5L%J_Bv<3>n7Rn%o-)Ru%@_msO~?=Y(NqDZzpn zl}-&qh4_XXY_dewC6qPr6Uu@D7U|^xQRyaDk>x?ufketF8Boh`Om7OM`G2phe^8L= z(r=DEz7p z0+0o=5fkn=0%Up$(oSN|gz{Cs)n;eEiTT+oqcEx;F^zQ;{ygqCrpqfW>R~w1aLR)aR(P;*wzSlP#1`$xnr;?mEQK~*f_BaQ-cNH|6raf!0ETWM;yj4GzN40Q z^xPxVwlx6F1)`Z_PS)g`Q@o8}{b4cPUw2IPLP|}$nNR`| zl5Xf7d)29a^OUI`R84H^+fDVbPE^YY=v`*p4mdWY7E2VFtig;9gkO47&(Z zC^9}&CXNawiz<%IicG7hu@4hNWI*QKpP z{*9!ss?WAojL0z5LE$oCPL$4bH-j4cP$-kF!?KKKajeJ0DORMOP1FUpzrxBF>La>=ebvKh<$+^N>!f0sU zY9lCvB1dj`Rtadefz`m_lg$h~*l)nJ%{uARCth#!VO^ut-x5TpG4YncGLQ09V>Ma0 zK(CHQlup=c7>|hTm)6{7T1jECJ~Pv#OFMDrn$dNiO3v`Y)pXFotwtXr-p4Mxjojlv~e~bH7{75cpX{PN% zPS*AK`AqFhR@SxY=(&w0B*)iE==oTAP6^j|YN8j%e-^rMF9B1tBSP{whXlI+EeWkT zvk`9R;N*Ar5Xf{tV76K29sdm>{0~zp7&#dJYf(gNGG==OaeKFRkJFFp>6(%NQjaTQ zRTlIJBxy~Q{M)|0cnUQ9e-zVF}9 zAGSvC-@C1~J3BryaEw!NM6Rj@J@$nHpTw!6FkQ4q2B_?X2;42yTyy}Q~ikMGPs$TX{Q9~;ThZt zaq-zy1b}>7dSm+VI^?F8^T;7>Xb+yzSrB_{q!WWep(tavmnEgCXRyPvKKK(X-3j&Y4?o zG-Sc$E+h|CxpJXl!#3BLcn3+og@a>&CQtBrIKfp>vxk?mbI=2vi>)SE6{}K#`>(X~ zAHsI$WeG+6l{n(^P>pP>OXsh>Ny2q!)w&ee%0DBAX6{|E9_<8dO_zvvifpPRUY~A zzVm1}QJ4845qXz>^*|`9nSx&NaJ-C2oF}(_Os1)>7NFYH`O)}S(BObW*DQ=zvom~F zh9NH%`>euvRf3JmYUOxjH;6d)fV9?pP zDQ#!q*{!3f!5(6g33iqZYfSu6*U3>1PyFqw#Wh2@zK;3R)(5{vxK^&vLvmczgZomo zUr#y~Q%xa^Z#=w+^|{QcvmyG@=(z^CmaOf}=~|Geqs{Jme&4RkK7GiU zk;R63n$B;c8AJB9;v%=&XknM~T+0O?7e$0KX1$m(6EP}*>Qg_m*+Yy+<*t3|a}3C0 z>#A{_E$kh0p7~po;jY7!dG4J3QAT{>oZTs=P^Y!p{vVsoXs@>a+ic%M`_$z>HajKy zWa7zqlsP?=X*@l7TXiNTOo5Q)^{_JqL@p;o{OYmIuoY$5#$$>Vqn)Yx*fhEIsM$$B zQLE;IwZqpRMOqoW=ADv*X)M}{_r_*g zGE9F%Z^Q^$^09L?o_EmVD61cvoz~Uip3@xEH)zeKgDG5+t#(X#;R%+MAH@@?!Mto> z>$YJn4pwn~<5B}W?Y(B*9RJ&Bq^YrZ1=qyZNg2Va@#Feh0qjb2gT20v7Gbot<^~7& zYje;VooMpanu5lmRls#s4?9O_*&2n8Ya`^t`(|@Ut*S{TZ#ZL9sWa0%Gbu8#Wt{7? zZT!i#72n3U+uQTw`T39r#Z$;ippUg&NEyfT(+14&p`}Nc!IufeY~;^hOIVEdr&gMT zzvO)miaf;xj+h=WJG(6e(vbSX85Xl)VVvE)h|0pk#mWI;p3B^n8VE98zs)jeQALX; zKQCUx*R%zV{K(WV1PrvJEUSKnN?KzOWd%0|js#GU0^uTBL)z6_Y1?E-fWcrkc^I^e zFy>2SG8AVld$JRar0s46Z=yVPon5vAH!70%6;-P(eI}f5u|5a^h;-;Vh~^*dV0?kb zy0A)qArrnI9Y^+T4Z>(THjn$u%(b_yT+F4>K$8g8_T8|lQF)spFobF$BU83VrEitR zIuBS?^NUSREC5Gax~Dq31lap-lLG?kE>jTa7Q;PIn$V8vMeCZDSdC3tnT?6a2&On- z7*XRZ`cSEHsx`U8FM$50Nfz4Nni$ZS`ze}Z=`DdxFDA49CI4kl`KX^)z@f~}NI4@o zyjCb`Kx}Ibu0H2wAU;bHL6}6ZLpImHo-TaQ9fkiXE(iv*GidSwPb!9fifRObWBPe6e& zK?)+EPN9)sBUv56OqMp&2^!5p%OIc@{;?;jb%XNQpC)Q-gz(!^aYCOz>AN_@4t}Nw zwTd6AJe**PfsGT!PW7%PD=);vWeH2ULHXmh8YUvmm}-O`LdE%hLKJvX(6V-oAi;!r zMQD;?#kal|zTEKQvY ztd77{3DxTh;xR`n0lQDged?Y+0n+@~k@JsWFNy}w_B1-e9v}r0!jlk-ADMPQ5<)pa zu)vzNRk$?&e&xtitt(+$g?YEpX^S)G;u=0f`A zy^zjDl&ayG9*!{o0x}PLJOE)Z%EJ7~??}+^a|*<(44aTrEq*M;^O;OuenwjSnhkvDbxf|@X2advn+dI^&J2cV+6!FyE`k&Es=DLG#9&XS&Y8_gS z3ub2qvYKZ*r6(dNWC>y$0BYo1A(&>G(5_Y0XqX$dXEPI-Z0&KB zt@JUhCn;nJkdHan1ycFaV^hW#q<2uSM_JB&WEENvlZCq8!G-kj@4mNOhzGHzMKX)BzCjmt0r}x;WTI`xmbTA9XpHEl^!Hvrg zNz-%($3!}`|5=!bbS%*9igbo1MY^>9Uy<7OXsMdy_vP6k)uP)uUOm?ShCY0dC;4wa z)c>am15E!PcZZf#EJ-VD&r5BYBhaLmB#9pA+7sZ?$p6z%#)>!%R_`>)7hgX1q5^N( z>BUHoEAm;N`>R^`P9{Q`CwvXy3r$_;70-$vxp;rKc%J(A6S{n00H*HqJ$7;TyXh0~ zI2n$du;P8#O2xOXGpTw28>$pHMd+$kzMmF1julE*GmAP~o@Wt~ z+*LElCX7N^0o7$Ry`>-bTk0Md41;S5R#Y9$lmk?cB4D~P6#Z1oQWr^;|YWJj`@_JlL9Iqg4tFOFc#;QR8@GYOl9{G z15K73ZOmE>rCsHXM%lVD)Ao_^cyv5=70vIfQ1pN; z{&H}RRK!1!GnIvBY}&iS!%(ZplL$Fk+_{GrBCSiIj1Rupx#9p#9}#xQutZjy;|sB_7v90N^(qj`k(V`P-R)>bhvw+!qiyL^&?!Y z)fc)=w*(B_5XpOGQ#mi==pwHm+d#O|l(@$B^ld|~m2zA%vhZAme|4?x)3M$(5jk+D@r;xjP{4N3{qS;HWw#(Wsa{ByeH)3{V{0SnR_;DIpwX`$ z?dOHsM(wr?J`pT%v$yUbe3G4??Md2AIq00Um}sb!nq9V2Y8W-AhzdSG&PcMQa0Ra3 z46GG19nriqiw^AScAxK5&|_otb)2*}s4c5`@SvBwyV-eZM%M;xmAp9TJTy3o3E7#B zXGMC)uHwJVhuwQA=ZY!6T=CR5t=#0OT)alazqrPCZe*KF-TRq}HkhYsgWK?yMUv2ZsxBaiStxKsn#Ql8XPO_U#m;xPIrHl7; zIFp^CGU1N$U-^Z@l}rG~Haz1%RUUpfmviB3i8a585@uY|*g$G8mQg?t(M-A*Or!6q zzKe?|Orx%-Z=$}-xm+wBhhqj)8}Dowzs|4P?(awwtlh z7g*8j`K`wY`f62(((Y={Ugd^ZqnES}?h@MA@<{-6nv}b=whk?3?)uX3&;B* z&PVU)b^nIMSx#bI%DS~+&4yF5eo7t9+jVj1j#AQgo>j&wS-npjP$_5E^yOhUev|v< zIF0bi3j-X|iCa13f=cRXt6<9>bnxjWd?~&xUzQI?I{y{K0h*F;d{%&I_a_{pz_{@j zp29~_UpWQG=obq)&G5x)M3zn5Vq$~w z!!WYr3?cZ)1GM~1WHC`r`w^JJ$oH8$4$#E?T~HZR(zg4baY8b;dKKh!`g6qubuzNr zJR}f11V0#{4JacOy5yQlhy0F~p=`btqyPu=G`-6TZ5SZ6U0nwg%E;m9RsX7L=UGBO z2X%(XF9D{%>z8cd;re}PzYtPOIlMr`n?VM9m8P1W~l?FIli{2^%Pvy<*@E5c5K6Gp*Dt_r?j-r~2OT`g* zidN~!M+EmnCUlHc4+0=n2B4r0KPaYQx4?i%&yYeui!HVgam#5@Y1L45lA76F;<(Tpv<@ld(R2rJp$?j>J>nEw)S$o||G<)fPeYT=UWcuO6QzYwCsAWd z(kfHT%4!IRMpCKyp^ct)AaVUY8!zr)CGS&I4l|RGBse2o`8o|sOVo&lkwx;^&MLqN zNlTI(=~4arq^Usx)z2=PcqCW?O*%uSv*DwEJ3&T_8OV%4>8j*yx(%;1h?yv=71)X} z1JutSr9q^1s9Gc}AV8^+-Ovt3CR+9-W}rtO?uFVOgHa8dAyQXH8!xVuktAXDBy~GP zBJON25D}$;9!M3EfV_$h5x`Mr)bCr@uaw;w0lZH!QqRB;pD(f(P@EO)XdELTy+u_4 zM!a^gk=@EXS2d4TR|6MznM91mP!56BASiLNCa;-oA1E3hVOO5->7N;tXC+f{$@*h!QCPP1x}VjG921JBI0 zu5zWhtv~!wl~hfF;!prrA9RU!PWDyEJ%z`N%_bnVKbG?f;wDaqefXBhc1pXw{Kq80 zYW57JN>jnQ%KPcoDAxMwEudv$O09SEkz7JRptjYoS^>oXkn=|VkW1> zEbUS%4Hgz8LBl6C*F?SP2F_i$lco=>RlzK=VTMr}Vu7T0Gc z7+K$=0@pMM(?&-jl-buPS~JwfG;9OZIoXm>*y*NNLL*j;E7r^P3zR7;tTjd28V9n3 z>+l%nN+T#u&O1>F%^Vkq*5DPMe;aGPO@b~+n2^TLH2XCtQx@M@D@x5Wartt8X<1WR zpdKG3YAka3|ICYi@uiu<5-!P+mJT8uV- z8y2nl28$xc-U2Qf+vi%1ovcO+v*WWvrA%T?Svr|AFhDcZNAK3_3;G&=%H3kP1(r9s z&gU0c&?z#Oly#XxXBx`iZ;$ZLnpjUewj$158alkfSj(2_-b1@vSfu-PabM3p&PL_r zb)Zsi{BK7}W5)6Bpw)1YONcc?#hS%muiyKJ+Ey?UVJ?a8Te}syPowNMJ}`~WA%P47 zZqpXotv9`vPJp$i&msML9FrTaU0D-`hh&Jq^`2yk4}`P*6t=uJT=G+1A*?(%+Zf6k zIWc~wAgyj&C-$4`-&NgNH1m)(-+1hbj~6s`7!m( z)UE15!+yl>;1+6i{}NtFQByh=x|0^_&7{ltwOAp!MCVOFDL78SPn2rQz!dkl*2DRf z387J8*vQV+YOI~|sB1Rn$JG6gtr8$wy^_P9INie#t!EL z!ugfW*#_G~12c1MKJD~Pel99+?1Vw?&ABU;EnyGl8Q*R*b_0^s6-?`&T^>gb@aUD0 zdqdfev_;;NrdW=v|8tl-pOr!&)%Fgc{QN(s{#`v~DgBpxUi2?q*JoR_Xd@IM zK-;-L9iNAE+dn$rZDzqn51z-Z_*Tq7=QJX$K{oFXH+;^u{=VNXZveTDkE=e+0pG(dx6K~F2=Xw}!nm#IE9@LrrS0DCXR zgPn2-abZC7^-#C!D;cxC3sf_$Q#>|PO!8+MfAaZzLlhHES`he}<;0VwTh0s37`tM7z z)+ixnmyoqpr&9g_u&ft#pHAz?2=^WngZe{lkt&B1AsO(Pm#FM@F?O)(HQYHPYJG;LYO2#uYe`XSH>Q0 zTPKiTVQ%#;dFv^%MU$a|bVqZC8FS^+T@9tG)RgpCTwo)qxe^Bd2A8~P#@g=Y8{S%j zD{v%4w3uxxKR&%ePi#BL$?K6tjq{v=Q){LX`RZkr>AmOwdeJz>RsZb4=P#NL!caf+ zd(69xiY5_Kqn0G8iIoCj59mzJ3BIQK*kiZ}{NtErJIMd6Fik)1Q^N`j)5L?HUqw7r z7c(_`i6He?Q-@i9_7Y_h!6T$i4ksTrgSISf=VcObI2I~`^`M1O=4$hM%%1*7F?xm% zM%U4#1@W^lxLykc#2d@i&A9uc?r92>VVg_Tvo(GJm4R%c6D_7$b~TdaZ|%Q$Ux z!p$eWB*LQp>T~VYLZ}9=G+fG7MeO5D&1sGZm^b386|^h*W0FS-o3{wG2h;}#!fstS zdnjtzT>4oRy65DPf>8C$Go|N2r6~f05c1OPnBj$<#J~z1{JoB$t>L%q6BPXDgS8l+ z+l4QEKY)yZ3uN#z5Qyz3Ol;6aBn-6L-h}?dIy@~^iDdCotY^O;}j{6RL>R?PSh1hb*T*rWv%O4}I% z6RI2jLI@%Swl#B4B*(Q;!F8NG;ZP2YL4-|##351%>TA?p7)Jbkp{VB+w9@kU0PsZ+!r2(c ztEd|gV;50lWL=?4^|fC`PI>6rj6rqbNc?FwEPuAgBGL6wsQ;&LB1A$`aF;XCx>ush4r*N^Xfh2 zur;AU@hD_7#f)%mM!-&=ko2Pm^lJYwhJBC_$@X!yYZ%-|x>=LCqQt#FJQN24N}enu zkwQ$!Q2v)e)C1JAtO6FtE4PD&=TbjO8=;}KojZ)2=q5}Qg4K@Sk*b08(P55&{^s>9C%fX>v(-qIg|-%NiV zBjQj_jO0v{CP8bMAkGRNJsQq2^>EfbG*O_~Y~J^3*MZB*U-0KFjJeGtx8XSrehY4X zD0QuHQW0D>v6{pGd&n{Z1h4!#WmLd!)f-IA?!_6GECJT3dV0;QJk*g)_kXq$d?)j! z@G&K40Km(veChWPJdi@)Ija4F?&}tL`^R~SCNuJ--j})3B)fxMu!EaM6PttqGq#D0 zGtYnrP|jLXs6`SaBxbWzRhHmk=|79Eg-nB1;_7oXi=5C3h7X1-9pXGwYh4r*E1Dr^ z`ji?}hV;G#{JM-w1x$mRXoS(#$xq;+v1+(XWBcrUVtOHj*`$Uc>e!H9i7T%@JN-u6 z4;H!qd<$GR<@JO@%@HHR?3cd&SYZ9bajqJnafD{jLDDYLmg?OQ?3w5yd-7R{&9r)Uj^ap74NkO$hw2%k3a2 z{J~4?zgg+_F|`bD4uSfL7>`sie-}*7D~UrS&dY4+we%ce(9duNS6^x|I(&{JCK+=v zL;jRF>x>SiQA897q9f^q-VDJ6V)V%}Wa;}1jnRJ~at_`feIZUJ-b7(wnLa{CP7~2$ zrb1PZUNYxDPqdh9I|D5y=f+b+&Z+igR&fnfrU}DTzz?33^+?*sW{rHA^cWYO zlaaok)tDzGBTNr`vcHsi67kn56uXAs$NC#!nHO@%vJ^+F?pN^E@yvm=6JpVCGZguY zm*hiG1F}lpUrlL#tiYz=?)<$|V5^e)D7T7`DdSOj*_)9<1k_?n>k)}~2Ut#~){Yj8 zq`&(ayafTsWzs3J^>=rqCOX_mZ&y&po;s1(fhPC)mOJ*0C-Uxb2k1W0NE?B*s6?-0 z`JQ3-neNmz0RjTdwMT4G29lCC2BDuLN-Fbs)IQP?n8!CV9=8t&r%hV0Gt&s4yj~u- zIJT!Eygpinq7r5qd55gI5nO zZ6dQE@zznFx9U-ZL(ThFEi>D-IUcr}Gwr=5B|{mfH5a4)GRcHKBwYNa0`$aWNperV zv3wiAX5D0R-AO@LTp=9Np17KY-SE_Szj%vy<*ZyTW@y6(an$@jjGaS}s6m^h%eHMB zw`|+CZQHhO+qP}n<}LG9)zsgMiRhVL^fn_G8Q-_bljpqWsr^bd&$*IW z*fKdm^6sM=Cb-@K`3|e zd#(MVUm1cYtPD`EMeZ9Nw3`jQuT(>Ow>|y3SKjtJT{EU=%&_eXBjZ4~VJl_pomwCHw(S#7;go;Cd3+v+*$smgcMw{6i_va+^R=CH+# zNlom!i<5B&60=(m-2~)&Xcxm$jLf1^NU1~po1`cSz8Ta-;Om9lnQ2Jb`*g>NHhz+N zW@At>3&|3Rs-oF+s9HC(QcJ8Sq`8kix1aFY!neKQv=?R3r?51IowRliJp_+ zs)swa34_~$%-S{x_ewG!2zF@2L9OHdSwkJ&=-Wv;CsJ6nuRD8=ctZM8)(oAeBDPK< z)?_{)SRG_hc~wEc?^+(65m3%?Ub>$vd?+ZB@8~YLF2A=7RT{jO6Pc|`I+CgC^V@L= zK2(G3ge|ZW$MV&aR3aTja=)05{xthvq9LH;*!&qs56qEq)88(7ONE6G^AWW(syKjJ zxOHH%-vbN3xwPqa6@T>8Vl5)Udnt(KiX*h0!OX@&FmQ1u zT_h++NlanOMy4?ExSu|Zy)nFPuFPwjt$}c+)=MI*Jlrx>YD2$&0Ax(~Mj26Pb1`bb2C2Bfr90=!M>+02C9yI;XybQwA~FhWK82#rDHbl{c@Hc$opX&7?P+9$vw|ZR zxEJCy!%kYANm1zT-#co)G}Y+B$k1ivuI?#PZeFw%I$ny3SFnh7oW5w8DOnLyp^t(q zhR_tqtz08T#y)OVTiBh_iBoSNGfF<{47O(DcWYC}BZ^;K7FrgTy?ly z*q#}@(E$4#KRMQlyz9LZ+-!B>cEr7LZ8`z`^9Z+sof7>zntxL?N}rzZ4YdRtvlSW1sTO|qEnDpCqpUtU4Wif58qKgGfcvYKM8F{ z+4!B>vF6$P98!@*RJjj?F)eVGTawPbZHh7Gd(FXD+rj5bZ++wkeTG@`dLNIcDs6v%uQ3M>9tHbI7FzeDO2M3V zw(0Xo*!AzM+Y!LRucJ~at(4{xyZ{dKiI3iDq&|*nj@RFTZ#SYdrK*Ts$}Fr_!AZ7( z?6;xuDKFwkZBOS@$5a_xH6wk6&7&_$kw}pm)}w)@LyGT5 zneARp*xbS%r)MHS(A2zNM1Yu_ct^egL$y_$O0|7TQh%{8Umx<@xFe)9gofYoR)kYT zJp!S+{>s0#HFrTfyoV2erhQAD?#xt@0^Wpid;Nn5>lR)Mu^n0iJh|plj^Zv4=n*rR z#6EqVoy=WctnZHA5^}&b$%RsTNbu_%L5x9xvSHI4U3bKZR^yiD@4 z#Zy^VMm*M=46XpQN_6GqLP6laMF2awr0w^W43ne7TBcI zYQr1m5{>4Az2t$pw83c`O2M2O@u4>L!ND5vU26;DGmp6maI4Kxv8L^^X)YCP58Rdn zDeYjrp*_5k0jkqbIMLg)b1O)cl2_2uZdm7!!5kyRoB5w96Y}5x1n!drdLPk+J0Oy*?S}RmuA9h?P3Q%Epu~A>Mggs{PI!q zdZEzPMW6Ty)Nm~=MLC$^d+S?yjM%dsCnA5yow9*vyX{GO%@YuoN6)AC4UK>}A^-nB zC$ju+3u#yx{`+*Mj!x8ZE0WJ&y@NCUU&8%Az=J_)Q@1>1um|A8Ye53^#@C-AlTFt+ z9-YR^c4gbHtT%y`v7Z{PtgrjIi@hYut8Ft8Q=6Nn zMw-a$Prb4|zwvLck5}c>(T_*FFuyPLjZTVPUyba#pBK`r@B6Qv9wAe<{cKTWVP1~!}f;zw9m}bkKWRox{gvD+iELhcZbIp#jDHvmQGSI z=AH(8nH}A@)1jCh`^!`excl45)r<*kE!kR?wWD`?BevW&F22~aqnNY%?j3iQTzI$B z5NerJ^jf^{?;9yspZm|YrN^s_vN=1a)E-}+4B1?1S3Iu;l#MICvb>xhxtEXEiixYj z>*AB=xofk%7d33I-s{ZA6IOcbi^&hi_Y?1q>8#fB-8_XCwm3Yi*|g{>ayeYx)64}S zpI5p(Io*@0@5-jq2$=qrFQ+{}-S+R%R;;f~?>?`~y<;&soZ6HIe7!!-JD#DK)wP+= zz#{vqEZNybi2=SHss8*lZ;-t z=W;!%`jS-beLdYB81yWjMoDcdjGCN#Gu}CE4X_f@qWZfQ3O*Nl$K*=Vb>xgP%VCLK z8NNh>`hc{k^GwZ!MXy5oTA79T8669}kUoq&+Q$Px3gmqeXf&SL|6%~GeZK!G(Nm2d zASXRZ$A#@V#UvauaFRcvD6A# zRLex7PXzq0dw`87sNdusPnT5_Soq~3`hOg%%d2kZb$ac=S-}sj1ss5lOB3|>o%jw{ z#RyG3Krj^F(FrS##A%DO(Fx@h)KYXm`Qh|W;^GZJK-eQQEZ)~b>xXYK>Ee8YZ&nvj z-ihQwVy(@6V5b3%S^T@F?tK2Er1`rBih%YcYb z zcLeyCs*KIvZ@o2Mp_>@#5f;n=j7i!25tQR+b+TT$s~wskrdYLELDnS!;A~Ot`Lk{4 zgh)uty&hKIGM^S4qfj&g84!fQs0TsQr!-;ClAwF5?P#kiDP1DcV9}8-RdkV6$ zk=hyBsfED}~=f z;eCO?+g{)9J_K0O9Y;eTcXJ(7N-`RJLOu^CBGR5{u@ju#gcu-;i#TxBj5(^=!5NlG>jy`-=g)!d=ckq zojxUcy?H|%6-djQm>QYN-^4h09k7SBqoKWxG9MD85p4y(G z@g|H3=K@_h@@Fd7@W7!6Lhejy<-FQ$yuepbp@V`SAClx$ul%^kP)DFbV~`sHD8>6M zGXP0IS8$+89UeCC2$;I!B(_pLfO@F4w2rXS^xNQhs@z^{9a0jjqlz*JOZOC@WIwzH zwy^zpZBuQ8YAI}Hk&veiT=(-eA7ul0is{n41%N7AKmfE72xhMVs78L`k(F`=Z%pnJ|O(nI3BFRhu&RMGI0Dd`MklG`~e@v~PWd!(!OzqY4MowXXTTMLs2^jmHh6yExnlSeGsyE4ZS_G`(@|7fSnhuv}92`K!jbf zkITm6aIu9xPp_(_vHe;^RtlCVC=hUYy^eI+rji_c>!OOK2>{YQqiAbZ8Q`%%?h>Wh92aa&PID2Jco7eQ(*zFD z9ywHcoqfenrr+XSNvYi`L=JbspkRG#@h##fKBy@HhOjL>dZ)DpAUXQa2YWDhGx=K> zNgqfDd=*4vfiTZ;too}DyRFCb+lfT(8nst_qCQVIytI?xN9)uL)};%`MrrS1ObxdV zbYjyxD_rlQMd(LGM4zn(2o4LyncT_P!d<&;Atx*coBpMN8*3*W5(f&BkI0F;@OKcn z3W$6T1aQgCAnjSQz(g=MGp2D3KJOF&jv{#IUI0mnpn>|HYl_i4gNwt-7qgXJl-Lbv zm;og>H&{H2ClTOoV@4epl~^Pq!$(q-XsDIRl!;3Bf;SiHt|VqL6&K}uEG{&3V3MoQ zJ@G1+<=H3o1T3fvUlPuJ@C>os^K#o*ZZ-i87}c-=Rh7pPOPqy`Yt3>7z7X!HsFS0y z(uqgJD%>NNHl1?U6=h{&6fyN&_*D|V08sgXapk0W?wvK~;Z-l2W{5l5u95aieJ#gu z%ZDS&Rob0|rH}d{DrElk#LKWC8+I;aOCL*hs7h8INYzD97Ue{|@bh-rELUXgRIwf% z)u)B-(3S2}_Ia-~7b)%=l~j0uN=ZjPLfmf6_2N}T+ySfh5_=|I?Q!a&xmZIjC(bOw z6DbhU6#m0@{Uk|hkeE=wfc>bOV#0B=MrK#$Eew`3J*a-#LVRmKGB*OCm2+SbL3 z9Z2zLv3}O3-x+XHiga{OtcO`*M+U4w5rjWhX!wT~ot|)`L80Qm^ANeAy%`WJAm7Df zPUkEli`q}4p&*~9CE%Jg0K1d?Td)HM#MY*<0bw*ig>5RHos%4+NXIZ-bm$z1cnL79HbiVG1i6@Fd*fbhWAYLLq=Mn zA${`QXg)*W&;z2K$KG`J1G!a_IC9jQQA1hkaR$L@qD4K3&c-37lZh@|;@(YjI)VrC zeAF?T_7D8eAUkU;`lc;Z3tM!6%V{`KSN{Hp68J2$M+Q8HYEzH455C}#P_62@e{-0! zRfAMqQwD|oRW^}`?zQsQ@cBi>6GD?;H`Z)thLk*2a;7Vkr_}gOfB=jL1)3wnam5`( zE5A*XUAC{poUv4c8)Vx{L}$1YwV8SMRwwV2o>`77W?Wmf&S#|_)Htu4(?)v6G|J>b zGm(ILSFn&-e6br<{LxKVtu4=I)Rh`Yu5d6~!H5$e_6g`ou)6?f>& zFUXV-5aRn3xB>{w4z1&Z<0Ow}!;|UQhjksp+gBL@ot<)ABCx|sN6mc3DzIX~SVauj zJUg#9IkHfpj#IkjTZk3$i)x;JQ4HdkVmO)pX@s>-{Gwopk%YFu3|4^6pa|UC<#9ei6cQI&rMOo#TMX7P2qS>`xt0aB zW>)}ZjfLYq?=4K_5r%g+r!1w4z9|&VM|>&Pn4yLjuQrfwd^y>yp6jm$zqPzI#rf1PiP4j_P8;ir7&P1&|5SEYe zU3mw-f)^M0JMfl%sSTvtwWYeUAKnMWV!DxXV%YG{4_rLQ5KIB)QlFHl>gA?<9?$m&vDm3zw2~tt({-4|JLMObWlc~ z(?)8(+Uzub;_&zA`TU%IvGjB%85%~`2^rPZb@ZQKhS0C;tF7=hGDditA8RWvrc7Gv z8d@nCA0OwaNm~B^2@y489_4#tIl3tHG{O(wDuY%x@3O;IOoN}5xlz8xG z*V%76us88I3v|_W^-QNMlCHvewbNQo{$17DoM;`*gWnHsBGOmy96?*L`55RX&l-Cu zupaI8`Ap$q`+4_ydw2Z%_sn^DeexQ0F;_A1F}p?N^?vja^>sIQed3qKT9;L8n{~zL z{Vw++bz(cSW6xwK^ITIgDP^^bM>DBaHC5I}w{@@<;!@&ebsO3{>AEP{*Y=OQyX_|O z!v^NdJgeKa)dyQs1D4(viVf4AtJ|Fy;$vj^b@QO4ri!!e-@x$P6ui*i6)pNF&qf&nCQ8Sf7?R&ZCnKquF*YJpIghnh!(gCnam4{1I!cdVGg=hON*% zvLBxbGW>H_W%sbXUzMT~57UgFfdR<+R`c@$LoiMV7{!{Sn9L&c=}31|Q;9?iC@-br z3|`S2zcV8yvCe$^oOuC}m};}!a|cEGu_(~-0PZ_XE0X*|BoHhWoHk5Qx%^^5ASfW+ z<_og)Xgv`aGjSU9Af^N|Nf&XV_+YCDfI>qQ1cdFcM4X5g_*lSYs z{yB`IbKJ~x&nrpb+CC7+s}z-G+m?{B(=uNZRQ2Q^ebh0L(MM)*Z)qYT$+dOk4>y+S zV(st44sI7&u3wf~;%NL9-U+$PoL4qSMBI^kCWlnlHXA)+CIe+9tDd?M+J!s8S8PxB zBMb%jrVi`vPXR~=H0utZ>tG44%ns*dLb~T{cj$X)>$T8Dh?6=F680j*MGp{r19NFPH^N3VSbh#pxIrI7d2hrc<4&8_6^bqZzzT<|WKh+UFuw}-)q>~CV@+I5 z!4%tbX)=A-i1bahAk$j_np-_?0OG$i-Boa_9mi{7K*8 z_oF2dKe?Laf7H*Vr^cwE^xC=d(6(oCkGTy%zMreU0zl*o-mM`wcq*j`tqi8?ddE`; z*SGZp?C-~sFl-spn#FU=k~7ltuAk}hj08F%E4dK^U z-6ghWJ|nZX9x_;t)C^O9qc+bgKz6ZEUeDa0wpT2bn(^8c&p0yp=ALe-3irla!l0C#QxSevSJ zBw6FoupUW)CZ4{=-r+V8A#Mvbx^l7eQdDkrrWY7~!2H*)tD8>~M`^tJG%AmjI0v_p zo7qhW)8^_dexU*o+t4Rvm^nyA3iBSSI3|I!6@CEdhRLIo3c(OB%qeA!i=c(y2MoaQ zj+E>NP!+h);0X@mLwr6mzw?h`wx!Nrc8-G{ug=0xA#P78j$rodKfYi{P|=>}tg5Cihw4PfNB#){qJN6$6}6kXrGb6k5Nwz&O5$;a ztE>@Pu(Fpo<=*VC39^~70+gV!nh@S0P?7;wffOVytVK$t{-*jW*65EV!q?AL1Y&P5 z0`#i&Sjp=|kz&NxRl%eF)CR7Q4!LPO5&?)%Rh$g!VeV@-M*dhF87QL*&1>DXF^02 zujP|=J!9fBjIyFQq5=|=A1q1aShZH9c~B6jK~ue3?hgSwNpQyMD%ffOC|?EJ~N`i{c1^43b<$uyOknxqi23c5<=A%yLH2zi}>FV~GYGyM2RD zql>4kEi@TMhzSrut;AnHe*RNouu$}%EK>I%f6bF@S={$=1G0cDj3~v47H&D!eE#Zf zsY%DUgQr_lB!?VgAdIQ_;hUaytY@>Ga zRjg&SjnGSDMzCziWuPih4J>2Blu1okj6sXJ1URNpX-@{@2X_bN|H`psCI`RG37-wK zRF#_?iwYcZ&7Vh8Y3+M8hqbiH>u^YjoEsntt8Z`tHfhK)V5@=pJ1I-mWW)jIB_nDi z!#^RtejvN+10H%$zM0i24Xuqhoix9%)>NF>I+4N z1sH-ubJgJK2^&C3(~c4v-{O*NXSt+Hq4gp6BZfdR0WO-qViBLr*_1#%#d0HM4ABmREuR#jDUv`E{< zJ8eyw{uRRy=uiAwe3WLS09d4u$G@Taw}Y#JsTRZ0B_S<(@e{4k;xDGBvoWVi;o^mmrGb&FMKowcyRXGM(!8T^ zZym%|Zf}exYMRO+#MXN$^c`+1y7l z)N?_gy`W`lv`_&6q%|l%Hc)&@aIa!XILXp5pBPjTo(64kT~-CM6@Hob20E)_Ph3?J z*PBmCcLAVNhdJ1aP6Nqc_cB$ONgim>DKz6qg{EpkKgk~(>_4s$+EVM>OkUls5(5tk zKRMTl_!CTZNZ5{8!sJz)NnKTlV9dbAgeJMFo&5?+nFGi#M$eLZvHa}iz9bn_sVVMo zsevaYA&X1Wt+gkIF(nA5D&xVQCck;pL}mF&N8^0;F_&Hk)v*LXsSSgX2gtLYaoI#iD!8+9 zEB*Xmu(?g4R!|gKSn-w^^f4u2DaS>?hG>%jfR^vk6QS+HIw`y$!u>=iLU~Ap9#{<# zvEI{ZR@>#I#4RmmB-?5 zh|i=5b62RF60LB;tR>yP2vPF|H!dFt>vw?O8xdulOaY znqJqIvDE913&?~67NWt?GT|eYcb07$;kMD^5~xi z60W2$#&^zb$Jao9FQ@(1J}n{5$sh^PpTJSsKVd{j=miautS8l(PaaI2n!lC8etEt> zKd`wfPLy%5|s#lo6-!?_!3u*{bZ+lQ%aTYS(?X)o$!rt07me)5wNkF=th# zL_!w4nPwgKOvmAkx$R|35XV{;Z0O~BhqPHtCd$gNI@$9_L%Sz$szr}2O)c3d)75%q zC+%WV;k)|pZX*lo-RP6nT@My|HeAqA8D{v%xK}Ig=tVCl&Ralgje2KEKbz8R$Mgo?r0n*Hg!Y zb4t&T%OdXGMfHw^DEh{)fjzhSMegqwCK)eFI;!PK9bH}-QN%2KoHtSnADpr|6f z^WIMQ+6T#t1@806+8f2_X=dmhUmC|z0B2wyoW%mO&2zXIynMpuB_FOsWQJxebr>w{ z^Tk_Y=Y+WX8hk`t{ahv$zhv?yS=nN<#l2rfIp<}s zWR*9!_gsu1y4O66Wsokb+>~QAdH+&VH}qP2rZ<3AB!Df-oXJgxHDE%(8;zXalPH;~ zi8_@TMqk;dAPAgc=Y@rV9l#6JB5h(JTyp@hVu0#Q=St#)R&FGG%l0TN0bBC=?V9FU z2wqUPMX1mp%caT#!FiXvS|T%lK3`9Kiyh)06iI-PL10mmYodpSGir(gbij14zv&Mp zX+JGl#<;fD*>`KVa1uUlYyiGm_7k=cL`-)h>xv zrk;|{%PXDk*o(sU=)4TNw{jCv42LEv-7(=vtHqq~^e&>DNIS5aS9C#o-i!!o#a0km zvk@2;nZ-_6#m~f%a}KxM1k*!sPz35z7|mgZjiDYvI5K?R6aoug9mBwBS?42FaXNzn zHEMw%cn#Z3F_UWxt5^+MqP!18aY9FpKUE{tG?OMi7rUYcYjTCl{fO?xzdcbXTdSnW z*p?{5(v?lfgxx_A*!ejmBqvwUH=@qy=}6Lx_+qYh)MA{n zjq9E_z+V7*IKPDl9r;&hU%If1!|1qRHgG5c`G^w$Vr(uIms~J~-y4?QFXoGp^7L1| z5yQ1A=|gId)rkR;eH_ZMlF9ma4T257X0%%!1yqNcV5beIqyKR7yJAg3+|419LKL6 ze*5*6X`!zZ56)S8%^x;uJhv!7of)iwqGuF#f|1IbBz^&uuxN%!a#VG$-+_`TBtZ}! zHyS#R=kQb1juY&(@5o5qUj970KUY9qZ{MMOivR|1AWsJ)X#&X)npkY<53~@^l26YD zXyDwl89Exkm>q@lBQob8;VMZk^L-s>96XZVzvO`| zFdEG5)i~bk>y8i*NtBP~9mro13O@8pLyYkU6Ax_-m&;3!)0B-91pYQRllw5BChh@n zupfa6cfXwVSwXOM-4OzmSg^%fyO}~)+;3;#Xe>QN&r<{M@1AIq3ezh4}@DWbKq9$_1-gs5;oD!Iye>~_r@IN1b+wZzlVflWI*^2EdP z9YEzJGuR!3@p$6J(s`?wx-mxttW@D{o!V1E_iw|_C^%)Zef=R_qS#lNo3Ph#J1L$g z$19+?O2o}r*g#f=%!DEcu{SImitH);S<>YA(pcY&g;nUX-XL=&1XrvSE2%_CJduIF z*t;RLMYt{H*2;o6pvJk@)vO8XkZ|xPd^bA6#;nRP-h~izJAMN!z z7oej5j~_S&dvE}}JDm-U*NKR*Et|Nca%)6mFiQ}xmv-^jR<8WX7!h87YIiHiD?!ln zB=oy5{gcp39|CFW$cV_C#!*Z*0+ly}WuXiVs5bNPrbI8ylw(uDyeBT{>+$ zPwJoHO(|DwSmZO5C-M|y^jpb-wVgli20=xm@eUk$?}+PKyFH0tVw~pPoKHX}VJ>(o1k!fg=ZpuZwrtfc*6_S-vt*Ynk7H= zpIAWz#`Wfr%fa8j`)!}tCPq)*brbpe(N|!d+nsvchowyjT;oH6u`t^bs7k_dST?%c z*8hp*Bw7LU5`iK@HKr1Qx%uYL_wmachwwivwxSDkSJ;LIk4ppZaRq4Gtc9$9GghI}`A z$2V?~D7ymdTCoX(H*S8`i;*eQ<}t1G&g&&+5p#&5uonIkXvD5q9)`&<*vqrpYmP!N zh!L3+vOwYS!k>?BsZ9%RhoDqn8~`J6SUMBY0iCZh%vL_%JVCyg{t!h0T43GY}y| zvs^a0^OWBTE-QXldhC^ep2S_?9jVd9OEg}#SIxv!H|rNDeP^&F(Nkk9O#X$|4iH6_ zXHA7GFIl@aUp^}@_Mvz)4xZ?eaI^TH5X+wa{`2wgU_7Lu@^9%POQW20MxNf7Ls7_U zM}tpc6y@&d>o?`ibHs5DqbIvQq0Ay1m}9p%9rrf+-go5WW_}jPDAC4aK5aF&) z+u0Zgo84pEQOD(a+}ZY{C-)leue9UZjSh2fW1BT4vzn=%*oHZ))MLx6Jz6oZ{mcPh zdhWu5c{bdl)l-?xyuMLC{rYIg=F*Kj`Q4Q2)R)yT&2<{$Mm7bl7w7kT?k<~u_rE{n z_vm#trj^%UaY+}i855l>Exi-FzZ%w%?~BLl=hv65>a5B9=#JU_N%>OjtnI*cRb5kd zkukZgc|TWwt6e#1()&CpusPE4c?8hvl4!CEoAq7YfUAk2-jXFnpS>CGgzrnYR^tN{RI>Siv zIwIGz-EycQ!@r1hFw5d>HpqYkHJ(>nPBlYI@uh1rGUrI9BP0Q&RK1s|zk66W>@*$< zX7kXJ5vlvfZGF$$YzE0~Y@8rE(RLtma@}=KpI;K|ao3Izlf#>iC~Ax`eMV@=IT=f- zDhce|USI!q;-fH79wj?q|9tm(62P=12t~0Id4qe2AO z#*Me(!jb{vG;r_w0dZ7tnrTh9J1T*0jm)2&U_l-qAz*$;V}KAm3I%a^AxDrbat|p+ zNZlxeaV&2Xr|K3Jq+^gW8@*S&32{WmaQQ@4~8|u`oj|(`v{1>(caUKh3IreXP{F&35A}HdN}3aegG=>8XBuxi9z_NYKxsb- zAt7A_V373wRUx=%A=Yvwy0PYLhBnb6bp>88Im$WQB+)M)KiQLK{I z?8b7h7IVxG6b42&-t37-`wP(6;tNG1v%9iXm;O8*q>$UW(}0C$%X)Jn!g%!%NHzdn zVV56{2jPe71twT1jjX&UWemVEtc2*uE@MX0T#6qKwFMwN>E?U6^NkExP{`QWihG@16#}bU@5+5Sm`LQRHHpgy(Sj<7J_ z1n$!jE^duazyX>k>QYcqnwqvfE}7NHg*ad7R=VCMkmPWZ>=!2iTzJ6X1k?UCV&5!_ zNA9$&?9TA_HrkydmH^ucAuRxsY%8cJyreu69e5Z|!P-RErrZ*TpF+gDS{zJ-$%ROw zfsX|D@7)9#>QX!JMm!b%L)LcQb+7nnJmFt^dx7c$sWYi5(qJy3goGCB2gBA(1nhS> z(3X5lzIFEeO@2fO$q!BFcp#XGC*CI4W^DHqXoc+GO{J$l+7M1=EjhVwwv*IEYl5R956o~P?=WPV*_WeUl z+~fOO8M+3tgI)1!87JKi;H0uO_e@|6_jK>ZMwcMxX-S?)sWQPS8?%&=W&D2xd1x?T7Cj*xfL8IHXat_E@?#JO zl0~70h{nG0#t*kjDVXwL*4a_ojJ`UC0R9rA9&FmBgSCR1e{So1!ILqS&lW6cz>x+JWMg9sdIf)ry*nc01udf|73ixJ_F3T!`%{J}28;=&b`mHC0EI&p(QC=X?dUz3 zW17uIz7Vt1)qe2rRvL`wN(>GXk!lGqKRI)tzVG}uo10+Zu;gX-_bPTH29_|WtIQSe z;&_?O(Ao4Ez5i%b5xv4BvYT}_6Vvr{XAbXt~Z=D_iO-1?jF z4`xM_x5VPL%VG0S7tH9)y)|{qp^UBPt~e)m!e)hq7eCM9fa-(!3tc5F{vlq8Ob`)A zV}YO<>SH2j?X!AV3b5Cx7o7#edZ|OWDH{EW4yv53T*2k3iT|tJwbwJRf5W@~(e9Va z;{cU-;`L16Oehn_G@KQStx?z^FH@1ck!#_RYxePil;G5}L_Vv2o?$k`_;5jN%vp&$g8pw-#TE9Pyf|ek3vT_Fw}MKKq)%!iQ&!qG>QICt0|29MNYobL zHWf*i=Gy$2ADg+2kp@JEpYYneh#bL5)pRq04lD{TNa1zKk;Cp7*3eTKfdwF#kCHM% zdLF`mqhsxz1|m6CnTj8!Z2zO$HxMf=)LEGeLbLhqzc`*Ui45pGjf4VyUYCQEX#h5U zQ@k=he`#I&g~)*dwUm1QDUH=^$I(a5 z!wv%o#jEGyBJ>;1BeF9oyd%wt%)o$@Wm?=b+U^??t6EbITlTo-5esFWirj=wrMJ-9 zy^yOIqwf~Y?dV)b#T8rv>a|dU#X{4Tdr2tk) z$F4d1^+5*3dKuu@t`K`P@!gizxWeP>Nf*8Ik`J0Qv`z{H)5DEYcp(u#V=A7(;}H#m z37=hFl+sQU0&=N~C{rd5QROoM`qjhrYlsZN6|)nn`#|gAtiB*h4J0tO(zPbaGsNPp z1kSd!uzagKLz&iT(5pgt-6g+8-Ew%19sfG4Q6>kZjsy3u;UpRVV>PY9 zP8aY{5}qLCgL>qy7me=+cHEzOJdD~6S08C}53nlip}d~0i`Z>1pLf#rDWgkyQeSh# zYcm(&-PT+AmuygelhawJ|9_}^3%IJbu3wl2>F(Hsw7_Q5Al==KbayG;4N^)gN~cJ7 zNr`kwcefxZC3l18e2<=od#>;A{qA|63(8)5t~KWzW6U}KYs4IDg{Yq+2XvVBG=4>K zZrYa5Vw}E9VU1(?WznZ2bFkvXXdNsj`lwnquztSJ{{R0;1IfCQc*#kAfmllf`vDB&H8x`gg~6^y1_*()Y+`U01#7 z;>Hqnz!>NtwW5p&0zpC2@zZGg7LKGr@>;LeeGYr?s5&+F%#&n>WZ{dZ7+_AV8J=LX zO4zrI32}N=I%)6mF@;OzdcwW{rkhu>H33>C$9nEWA$e#OzX-QdRRa18_j25^%#$EB z`j_$}seqvF8xCpQW#0x#O84)O4qXS-rSbk@Oj0#H&))_4a4%*)URH&1QjAOP^Ld>8 zz#Zc}BBIoQRJso8G5j$mZRODMjR?23r}6%P=8-$q`!i8@gz$r&a< zQwF}IyFJbN$S(%Rk1P6ExI1|dwQdZFzl2=2exLt#WKi|3w2-9bly&Y{srhJZt!z4~ zWzS=@_Qc$qH+sNl{&nc~h}seSn|!GqE8XFV!GgKtaJ4y&9Wv3Y*%qROC%)cD=Q^B> zqp7`is_kj-9yOg*h=KWzOnH~by245{6LR``Uh%x<@(c^uS2I8Tb@uvdF=Z|ft)AeG z-T+IVwYMdGLvv_f2CKf%P|2GTQC?p>5x?V#OYmCN{`_*aWF$z4IenV;5Ms+T{&Slm zgtO|-s?i1>B)6~!7GKp9E8{KrysD>PAT7GRlEboW*G<@B)pg+J<=?-dhF)#W*>8#U zvxZ9Yb4UB6Q<@VWY8tY}-%>1fJ)J7FE%5wRMWHRps?PBjOeckb!Emxsihzv0t+Nul z4L*toqc2o+6Z0{stP+7*T^8?cK#v=)!t&xiPNW5Y40R0Q=EdD-?60hS*MyMD;kC0# z8N;kRs+(gkBmYr-Wmi-kGpC;NjTjf)W2=XYSx)8$ABge2d1G_JV|3k(2}A@r!;cT# z_3=L|dklGj#weE4NSUzhagkBIN*ONAm@a8^dmYMkG#@w+Z+Gc+2@`ADz!^n5&xNql zY+CUwPY()&FMrsMtfqYXycLIfc*qTs+S(3{=mUH*oG=pBg%*smEyRb6qJo4=h|{bs zzjcGK>JK)I!ldY+JT}}i#&~OS@Rp?mvAw}>0Z=O0jlon5pIGAKu#MFDOMU9ID1($r zESxgcn4(XHCPL;(1fKu_vDBp9gM#TzPu$;<_?5j3Wx(ZPB6nE3{J!|4@9glg^FB@K zm}%C?*C^>JMkq_7&u-IV51j|r?+Zgzix*eNfff%tE|DJgput6mL0dtL+_b#^UiH~cuL zvolXgR6=@}DB7z4M5K%D-Lc#f>VfM!p6yRjorHAN2+IS4*=E^4EDtmGR=P>}>8mex zjrHKvy0#8!^Fo9ax0&LgYHpvh+*BVDWRgN>7B;O_&YXthVyBZ*4-@C<$ypaQz_L@6 zDP3yuj=EpRp_wJ?v17go(;wy{l@v`_Pz`zxg#{5RMVY##MAWQt3gs+{=SN`n>d100%Tr#ZOBJF*x{e9u6*Rjs_jA`eBAAj`@=8{0dW1# zK7Pmy%NaoKfe+`73OwS5r_(JSMXJ7&eE2K@>xtw=6B`>Mgs9P`WG%(k0W4vot#3;S1(DYrNG`OzYf;Z_g7DLI^+SAFtO{U4A}m`zj&`Hr?rJ}2 znnc+!m!m`F4FkWQ0s{SkGXg1ve&$06#S7r#wsLxglfQ;gCkNhx1d`e#~ z0&y((Ok$8K&9jvg}U0XFY>OSA%oVzM0vs_^0}DpygQHFd_3 zpxNj$PtkVI1yMy1BKRLlJ{VCz{z{z6=I;P;;|L{u!tnt&>&v!I8B2-C%XxL1cl1E) zx4b3}(>4NglN{N;vrW5z&%S}wrquaZT0sMiInC=sa$$?b1|`Yrm8>}v2=x;mmst1P z5E(_J@|3izU^K>HVI4PhfpL7USApaLQ6+EXSkUq26v$@MeM z`lj@JxlfN~pA5Pf3xgY4GJ_7^3fsTZZZ}bwjy;>YB(d$;K%P629vb={ZKtbI2efyu zg(~TsRjv5~+}9#}laGGBt$KZis~cavmx_rysKoYBpv@8-A#Mo%XXv%!S9t)q@) zg?+^QX3|lNEQm=Rj$5a$9pO{@c@p1>0bV28K*;1O<^xf$6`;`b#k;&!IZ1ErAmshAa>!hvuS_=e9g5YSK@J7crC zBGFoulA!m`i!_w8Gt`-!ipZDw9$WPG*ies{m)m?r6F)6ps3GUI%=cejL_dZsP`x-t z?x>RBsLo9C(^R)8j7!e49wlTs!HSd*0Uvxy&1 zWG7xryj_x4bRgh|bTQUE9fQYH@E`_y(E!zJk+w%&1PRHTLYqnX={|*N9h9x|R!TFJ z?PB56=olqwTsv9!CGw&iek}F`cz9r!eFO{oq(MBl1Anv%<37ZQ7ftp<=Dn4n*TGiO zz{uPa?;f_y(?h;!0eXVRLi4l*3h0!mRD~iF?nn?v)7kj$5o{!|(FV>-xsoR~>u?cL z!teT_uxyk!_s363>%ThfF{!m*u=9@dWILK|J|V9Hg3j0jYCmU%LuA&)C#D^Av@dE= zN&7o1WlL&^;=V=dg)MoQSt*X7p*zu71HbYD0@C3IN^Q;}CahM6RuLHbKx3`3DQLTe(xys9Mwyp7Mk5> z!pz|d99wO4f+)0foh!nzn7fS>z7L%5Cr82g`8S?B`|##aK|9T!5$UePUeH$?&MnYl z3ut0*HozrEoNyN;gSOFy0ubB*PcsonAum?YX9OU!UYyfMFHU+=y%-8ClRbH@cz)y( z&En1y&esq7U_is%C%bA2#>C4y>(jSDP1{Md5=wFDob&3_$rM9vDR;6Vdl@gF+5sQBFrBktwh4aYjfj)tZ&0GW#O^9+zv@O8& zdCH}t0@|73c*ZHw{@!<7bLpN@c=Qt5{b|;e4toH8Tk>iOy}@K#aFjz5s;w$FJd_wnllTa~KH zF(4xj^tcf21mohZU@E$*yFfUK{=PAW2B|ja8^Uw==Hp1SWGA$sv#h4LQTx+=v!M+H z!?VXwgOfFT=Oretu&;NW1tBh)bMbfs27>lSTu?_D*_8$oa zgm@fX&j}l6`ccyvWIqLYj;I9?gswF|5B9?y);&=MfI>(UG>QzCh3lM(TwoEvTWm<& z17SGV#YK2I2#q$=zHBAXvplQ1{Cd>8wJ|!FcLTZVhj;exMhVAnKd^@N*O!Ir=@fwj zS3W-iSCWS$A;B+JsRlvBlVqh0SenVfpRRoIr)!KGin1?s6KCc=f~^uonR3;_UJbk) zChKW|a4FeC6`phZvD(!IU)}GGlf}}J9aIBqV)3S3R07{U&nE=pZ}h)6niRN-UCu1& z3#dH@tzXEDR^OaH+WHO-Tdi%_YaL#{mr187=`P|a3uh<^yymy0mosYMFfUK@89*l4 zG1}FbsJY(GweQ`r+0;}^Os{2`;AeVoX8YwSFfw}1s^oD}k=9e2Bt(eR@2duV~m%2c%!E} zOd+Y0)gNmt3hgFX_b1-1O*_^linTm_G~vT(O-}vP)Y+D`B3Fm-$??v((_XD^Z>D}j zqIFAJ6*`uAXVoXuirh}^kglypxrL`7cy@!mFi7j9&*xVnCAR0D##V=Y)ZjSQ(*8p8 z!b4`E#SZ==B%o23%KK$&R>?yzCQixDRraKw=%}9(QXRg=2&5U|l z-Y9vpKMJ?-*qN&s`#G}Hht51ZjwHe)x#sBa>zOt>k)PF;29DuS8u;IobYGYeT_Ace znQ59lI7S7Ti|%_))JR88p&wK9_*11$!BL8MZHFd5XYmnNQm}mIwgt}w(g^L2AV`^Y zpBa9d&ZBdAHs|WB^&&*R8#SraGB;~F)rMoPzF4kk2$PnTNOC%rS`_)X3_8ky@8mpV z^yL>F8~!;z3L1%bp$$A)PO0hP1s3UIzKoA)+d|-`3+HUwv%dNz`8XrqP{&@%`9cay ztPTS%P5IG~ih4fL)LJzfaiASCNPds?#^_Q&!giuTAC~qZ`1EpjXe>m-I`k7R@g~w- z-$79XmE1Je6Boz7xKN|A zD1c0`^I!@qhxnA@xX4$mCD=ps2m>M2m_HzcNPF-F=uybmk)j+mxb?uTYtwH~1%ty# zq|N;>su)e@f%%xo8Kc=7c}I2I9X83@*OA8Sk_p-o2bOd!%3ZJKI88v0uQ$HSheju2 zJ<2ZbVuTo(Eo~|XA~2>Oeqbc zc!_ayqGkES);|}b)wkQjWjyz_<7KmL_WJ%4C}?)E=||?QLpyG}P=ffZdY>M-5pY@R z$yX`VW!8l+;-Xs^rr8D7XB@FHjN?>px_CTpyeVr>*mwgqUa+)U#1yYDrR$n_F zdYHwrPf4_&*Rf!V@2ewSS1xXvyV1;JZR`k61lcZk&ktfyCc0eRlkPSIf#l}8R6D0_ z)1Uj~Ul8Gy743FQVSj~>iiC?eZXJgdlLLf!`x(DSt&PIhT0^@|BYpZxbB4sbSaA}P zljH*+Z0971!yw2DV}_=xD4TqJH&J}nxFB<{(8eF~0f4;8yyi$?H7nIvi8n#ekTWP_o3mGvcX#BdiBG7Hl<-JP8Fx zyaL8WBp97Wxuf+R3&@M^hM~?kp=yqR%z^;0v;l{I>XGrMvKt`ZO#shPlz- zA4G6@3K1HsC8p(g2q*3;l@yr7Y_H3nRN6^1;np#)kxI5Gw(_Z40d$YG9f?`nxYfLco_igHDZSFp>r`w}-~7AYmyp6?oe7#wOm z7$E5vD4ep;H|NphHA=yBy$iBQNgBgXf zqShPr68Cfe-q&Wg;S=13k#NZ@emmV`ttC`uFP%z-akq|6mP0ci8?DDjdE%_W zL#VOK1ES&g%{au0ik~mCkC<@jnScW*aWx9(ed#<70a{~l*fg+dL&>Zdd6A2D8&S+< z@gb9mJcaX&^om{HMf|*k%i##8WVHD{9|Ld{QQzc2CFRF{Y*uSg2z>PRWEcC-2ugw& znz%?QLV%h3S8|Lz>nOCYz(mOul8m-zk(>$S2j-r9q5Pwg3Qb*Eh^Cmr#k3Gpp9Cks zk3oSp?8k&elM2GwPqx+hrb1>@EHnV1ZYiOfyJ#cTUhsp&FYaAGcPAG@oJ!z@i#Bo$ zTMj0FrTGR>;oq4Vv-#O=$=D5x8-$K9ruqW!Y?l1>%6qE!Y73>Y=tkKxWP>?Au1*>e zFC06)U>n16+QpVfEGjwuqB}G?RX3i(eRcn+ht(z&{)NZ3hhR|z1~puXX}h)Mv}t`h zx>@uYax%Ae>ZW%L1pV9RQ?7m=!fInu%e(qLMBk5J&K={Rok~x7j3DIA86<={8B(;; zKe~&ioWjFU+AM{9#tRY(d^G$O{tY2Z`2bcV1;rU|m6+>E4pZR*T6NvW><8&27<#J%`K z@DpsRN=$T^RvZG0UpTupEf>fLH@8TR-qb0CDTnBZ7WS1eDjghWiD;p@F)$0=q zKs=F8vJmQnYZWK?yhamT!#}=4-q5P3a8PWgetK=DO^MJHZW&*0)}%Cs3ehm!KOxXpduSQ8f5^sl~O! zo>1E6KO(2Sf)(o}Gp<%VKMD`CYshFFS(o~M|HmW}|x5V$uLr?uvW^tiU&tp%r zeLfhO@I7vM0lf0pQc`>vKr*fLERG725*Bi6d4;VG$S-4o&_O_i5+OFPPW4$Gu!H}# z?J+4eX-*rY1IMzJ3z-lGx?{GGiSZlfXP+@>H+y8o?5^jP0?xXJaRd}T1d)7s5~{ea zG3DnYq8(55N*Il)CopLLLO5bNj;f^ynY=514ofP)Ca$T_JJhJsLVjqzC6Cwtl&G;A zJ;#G7*JPA?AglDxB^QZ^?$cE!2m3zn^i%;IF;gcrOx(;^3m*bMw`poM5ye44G|JbH;r{D|@I{L|k zZZ^qYuI%>H9rP;qla$(NMvl7J&l@c^imaT8{Gkqql4bQ;OX*zdoTzSyYg%+5UhePP zoVMfI7CZy_Xi@46CuQX%T%TaYXeAuui&VOwo}{c=@*jEjlCSUBYd6MK*sFKey&LEf z;rQm#*-2d#yltWn4aH(VLZCF|VIZN(b+j|HHqrY&sRfekrOVPi=H_oc!-j+Pm$bKj z^%*N7AiDR2#%3M5sTo_n5G_^p%0^JIk6;=;B3;L|FYUN=b(BQI&J*ElgWW(Ev-Lg# zsSukUG8Dq&=a%0?L@##IJTxo<9SPZ#98!k}6y*2yKPM?%gnDR<1L_)^zSoDZLZGIR zuMFxu{yw??d2f7sPNB2-6Ua|o&I{ma3-I*=iGv@}CBfJg@bv*QByW%jtZ;c!kco~k zw2czcqGHRNnhVU?{m<=h?Z{q;zYo<2&Wg_exQOiGrt+-vdvWM~#BLGg_Q68vLAo#efbNIq`xoG^HrEa|%oySYY>yv7@%8 zvtTrzN=qbxONdRyOO1ruPle+w3euEx0R>?sAThS6S!}TaY*@MPxYATHkSns^%jbJ_ zz9}w#Ru9&U>I|yT4zK6ZA5RBGHFoD#`P=ke3NPmNVy<}U7MWi2q`GLcJOWgY8)%G& zRB&U;0U}$Ko8wCl+XeuU4_x9CW)4C@%(zHJR?&+(M9;?&&8X{Nwb%q&6E?GbABI{r z4|$6v@v3@987B>PhbIl`<=2F7vywZRAD|)%%5EA)hlkD7oNR-&_#_TVCWx|)d)u!F z7?J=wQUk&!t%XL?dnpmBUxGjmm4{}@UL|flg9*`OYeW{RBSdI>qakG0472ts9Ug1x z?)5Jx-gU%%a(!Wrys{%x-{5K9NbFJ;$)nd~WzjjjU8~Y3^EBfC;bleBd%cKe?{S1n zfdi%Bw5@6^ZY;g>GD|Pub*CQ;L85J#(vyA)Muvcs+DmxKCfdLot4z*74`qdIy!T(R z)c$?i{xoY<-5pE;%!-EQrijeSrq1@RPR6Fr06souMJIb>6;l_04zr@DIDlEz)XfFJ zEM*J6Q238ekv~4A^bncF+*~A7Tnt@IZ>vgx03ZMhKpBx)f*rtqd&JL=$SiJe=W=^i zoDFcBEebv>&H=dHXMB6a3E=$WhzoEVSH3Od25|pz1Y`k#|J}87+mf8AiKU^iy&FL1 zwvYqJ3gF~s1rs3x?!nZ~#Tftu{-~(p;^b=VqG;%JSIUCOtZHcF3}9A41T%bF2H?1( z6Wpe$or%4X6(YDP7QpQRn9l#pk^zDM;D2{K{Un?FB=X;o!ykhFRSv-4ki+e${R49N zWnTRM%=;fx>sQ`ce}nhinfMRz4qhGq$Q<}%IsGaJ&~M1$cJ=)Oa`?YE2Uzbw1CjYx z-r4@oytCc{$Un{dAM2bS{PPD)?G|W--xBm&;`Y(># zy|~2r8@&BPxb(|<{4er$FV=9~!`s~egM0epzRUVQfi?d%-az+a3HSe*H_(4RmVoZX z65xFb(_aP-^q&tVpnJgtc%K&ZSLXin(FAlangH)pbN{9k5CgnV zpZP1p{}9IfLwX(Cy?6w?Pr3PvAlUvPK>DWyaX)z7rwRR45dRQ({ZoRtWACpTA@Dx6 z=})8lkJ~TXKLm9DlpyX0#rt%uzY5|%3>3khf&Z%SgYABRyiW=HEA#&lK>jxr%)9qd z{hQGPyiW`J3v=xM5a|7Pn7bFqSnt!q{>t1x1Z4jm=I#Y2*88-uzcP3G{Xg4YWxp42 zSnt!q{>t3{1mOJF3?KG;0fqHGjq9(>{pSP9-G0G;0~FT#w5Yo={+G$h@t+SU9QOmt zeVWlj;BU8suMD{Jq=7$f z8w7vcT>^I1|LK3fyY?rc-lD|6t@V5Q{n5vr+W@?I^mZt~A3yp4{U8JM;}XyhGVXL< z<-h6pw$g8O%=V*?JB3LZ!1kjLwjX`m-C~ph>_0AH|3L=(-6en1@vY?l_Z@>F;8_9+wT6RIj}PLqrD%I4*ng z4h|G?+_Cplg<6`OrU>3Il$Av^qjqOdqF(z44J98HcaOi{c=bS*(XkOF~ z76egG`46xZ{(;Y(-;B}2*7-IBEXGvgKsWl=5#DXR8AhNpQpRCuJ;Yv1W||J=sTi1u4kPa^eHb`C-R%iKFOx%h*mSXy<(nG zrqi#hd5)52FYokblskrqx1}VgehEoHDpU00-y>z5w^r64{QOD!Pn5H>-17V9uL%(G zo)&(>iNBKw*Do(Qk&EO-^q|$W{9$DPvo9?I0~W}ahI-v z)Pqh_Wi9jZKpZB%nR@+=nSr&q17BNyI-bN<9u{Wf8MNy}ylFwA-h1e}UAwks zY?M}0lPYKAM3|kRtM%ha4n3-D|B5$ww!kB({Mb_sKrRW$2vdY4C&9&FMOTKPyb@Cg zYcoRpW*$inB_M%(QQLq7179q9t*S-`^Pm{pe}6@-bmRfw@(T*#QdakQoZ9h^2BxNI ztlk#0? zeG-_-`TBVuaqTdcJU_A95jl1<0-cXS|BM#3q{EJ(9{lM}aw!bk5qj&c^3rAP)y39} zR!c&|^;hts2mD{fUl@^*EIt>P|B^C3_qs14MhJM)9{-Ikz5K>;bu#D~%G&B3GQbsaieQ&9jjfabD|^-#6Vt(QYabZ;t@UhqTB`?8QtHKg$TN z%O#v#P|g~b-WXF2yE7$Bj?8t=62P?dbJ_>YeXo4_u&JWM0!+5691(PkX14fzWBMk{ zjxR2D2}FHix;`jtuNT*WB>T2mL`~mh?9I}zJXjY}K?8~rewlJb*15X#-oh9(As3`E z)E>Luac`TlY0chPG@GD76_L}fJ;hm^ZPbcl8=9p7fJii-csv+8I5Qlg+F-_#hY=Du z4_Wu7F0W7uo2M%`FNCb1yW*2_a4+aOD%Z z8s9m*rs2xNC=yt*Bh%&9&t1K_4{W|SP!q!(QLn$;5Qj3X+ePnR6v5%-GvT-zG@KPM zQt)Js+~1%7=Kh_}SKtPU4LacCJ?rcCHt-wki=FG2Cuqs3O2x0TAT(a$cnP9%lr?@N z!E)X_zj1}4s@o1Rp`ntZ5S9@<@DmC`4oYrC5O9$L;jbd!Ftxi!h7HxTbn*@Wd9hhG zJlo#BIt$ux`kdA$Yrvuu3OC^H$LM8l23KnaIhAja$jpd2TXCa|-}adpn-4#v&u_!3 zp5#V-w}!+0%i6F;`hd{hD8{a%flv*U-e zsEoP5K=X-o>s>=zZYCYdsr45f4cn5DI%yspCR84j&N4JC!nQX@wu)z3OO;iQ?d9I; z10|)@{w%uGwZ?fpP{XL+r{%Cchk2V*MX_~L+)b@kCTgJlEwRt#DbxNBmlMY#jE}XU5e-Mw%<<&>xfm(;`@6_yL?CBjS1cjK8{hr%syWV_>}E5Zuqbq1tIH6G|J{{ zVq6)^CF4Fr9<JSIw9pwF^D_!6B0u#R5l_Z4s zOv#_LtG~Myv2y*Au&<^p6%W2SH-M)kfaz(wCo4O9=<&^amB}wI7x4!Ev2g^9es7`R zQ&8}2{0>CqP#~Aa@UhTK&IQ8S_LJut8X6!fo;saD!iQUM8WimdVNRh8bY(mi4O#?@ z^*oSB;>6@Xhr;Y+9r>i3Q?M9JrvcBw5`|_p4+23kdc`HrD#bPPcAJLUp~}CyiMFE%i^y#xy=Z6>hu_A1MYBG!2DE%^ zHUM96lQq z-8;TC_~P9A>E!1p<}Z3mGBdOK>IU%htX0_+J669)KTi?7oNb4+gA#?^8_xL};#m{0 z!JCX_4Vg}adj@M7%yo4YLnJWxCU3KXd+AKGAz3N$fe0((M-UNICQtp-HY#L>m%RFr z<)cZqrI=r{UL%;cO80|Er$#3e2##Ps?rqTXplbVyTFm-m@_QE-H~2ORjfEz3`V@{( z8jPmf=6~0pQOyex?iP10EmqzpeFs-Cu$Lf(T#W`l9hsn@*w)j;TQ%vWBGbqoJRz&m zL?nT0n-uy?G5#bDi78a^K`#fYCVktq{E1^){=rf95VAMjOy#@Q-2HMC3KU%P1Tvoq zcOZNa9HSGdG`w2mRH`gO?~!Q=F(x%*!cOI8sy3ReQ9bu`M(IlK0764L;qNL-Yn zO7T;!$>*3`kPc&?l#oiQ3LbM8w;;xb|-Djv)6YN`2y>dYw@2p!*C zG35mw))FmCU}K+;B%W-)b!wFF!oad6*NrkaX47{-Byo0rk~?hS)_Q7Gtr6RQ>6kpz zfVQ;gqD;B1cb*n#<4|*Ep;l`S(iNW#ctUFlL>M6Nif=4L$1GK~F1IDD(Nr_;FNufS z_e{tVtXySI8d(>tpQy!pDK7umzoy*|kzq1=6SZZFaVOl>V~qNW=Ml1c$AgM%s0}2W zRo~ZR6W&q#6%x@K&vRY&I(MQ{!n6x4k4I$fQ_r3);pCVyOShjfHj3hr(TcIY;B`7{ zh*IX=cVd^We>Z$n1Z(Qixi`dn2S>?i+8} zfz99s=dhoNJRhO<^0>bQZ1_YVuR$@>IDNFl>?G*+GmCQ3~4JSZw*y$0F9UOK9aorjaV3X%xMXYSBU?%|hoRcY{*?p$aPuAq$NuA?Y zTvSt)a@u7u?C;(}yeOmdG{Y#fsafV2w>)TWy7I?E~jC)B3d zD?a4&f!})O!WODTuQ)}j1wtX0)(~V-hDNY7u6vkUP4PuYeCUYOU#0_2$uQ_^3B9g- z5AD>Ah=$R4c-YX332Z2$SmXwjMj-xFX$zCSqN+kSU2d{LS@49ZFt0sB)7Gk4tQ0*LvdRo~BBgK|Q#rK%ixDN!!?Y84P|CLwVf2fjFw2mR`OiT9${$ z_be)Q%g|{xmvbr8$)dR8_*LjzER#K{kv9re^BKMqzBJ{c;rFba-*kI`IDx<5iLzAm zEDLhu4$hSy@|RvuK7{q5^BZ=+qrq%QkFl*i5AX;@+(_pJaoSBP%~OKO z&o#+IEn-_sIP6n6d{dNSzv3J}8qqFspc6CnIkYnKdYDU|5rHUISAK5K<~~9ffIm%~D^A@+=1A{-m=3EQSp?nT zjnX*%8{N{1!=eBh-;?kRg*QZUnVbua{%C>3$3ulHV|y9$-Sc_hA7Y_;NreyS>Dc91 z3qKAb7&F*@^6}L{g%JtNzRg!nfH{0d3McTu?}aNHu|I4LDqUmMfmB!rO@^4(BC@>{ z*W1sZs2!~a3h3MrU&+2+6E5v4fHAAZFx@b1MPR!0j`qi@8_Ppz#Q$bxf&8VjVhh7Aw`;zC zQM1Dq^$abFPJF~G_55a-xHtwL*k`mt4&|PYpbt5Sc9`BMuZ`vnMS9e-%7+0DP89g7Ho8qMgM!>dyOxz6~wC1Gw?lyk9gtzJ`K=v@e zI!Y!7T|e?f8Tis(W?nk9C%qr}($-UtUDPcBL^3hi&uGLCi=_C-48%Bj*?Pz!*1)DH z9lmsVlJBkGkwey6s%_W%1p}yOn|n=Yqz9ar=X>G6Z-(O)U3Nu(BVE0BC|boEtGu2&BOK`wRUc$}XKt#HePeDO%sj^YcXZ+UO&22Q zFLP8`$_|Dcx$!9C0^(8B*_jLW8VeX*SlP0`?~FE0())&uJf!N92{hco|CZThwz(9t zV2awPt@gTRG%Q@h__`Erb<9Vby-;+yMvLAfd3lVntFuoQ=b*PQ%8+`Zup0|qAChLa z-_6Dx*NHyLZEOwI%nY$|Js9*Qw@~5DAm7@C0jo-&=-bfkoZ7VfCc`$3VxuKTsdX!U zygp1;CmAxQ3ja4UV`+?!W0G zW##zAcdMo(;{;2NFzXr7?3d`3a3o8g(AjutuacjVv{C4v8`njX705#$j>-jJyAm1U zynbQQ$PxXe7RmdqTFZ^&{-KJ+g%UWo)A67QD8V=M&YeGBF^wWKzqn~wo}OLAblt2r zg796yTc-&zy!@SbbX7!udGQ?Xc+%l`;S>OVrwB1tF1&8%ZUyTu2^?c|t6Jlwj?y9< z9*~udYp`W_%P^rb2OfsQ_BxS8qXoe!*PrTR}~_9vx}3dq3tcjx9&Qw-*uAw>KE6Gle23Fpa-A5GD`eJ5y15{UK)Y! zVZ^GKuPP_JgTXGv7`|co!5%}aTn>}qLMxw{n~14*$-4)F3)lIC$U_80kL6e97=nmS zyW(vjr}IliEj2`T5E4%6Qu6#nY#ZVnPz41_IaLlo#qu7FOQ}Pg?}v$I+PCS`*faX) z>gb&Aikl(n4-p59jSE$yoO(-hc!iv=wiekbTcCP1uBXkY0OfZ6N`FQ|ZMSZE4Z2R!+38Bm<-#zg-ZO?H+mT8plb~za z*vQUB31&5uG2Q%iFT7XUj8c+bRbOY(-c;N&KuyB~%h)gP%^SlS^n zOM#KXF)|ewQ(JX#ToW8B)%^1W3l}&T2aaW#I|IPU ze1A@tpJvOyH~=@`YUFa4S#?_o4wc_hBWLL1WC>1MVq#%oVdLUrpzo@D|~aym! z{WZq*XRPc0#^)_U{n6>aHxC4|+{zlr!to>I#t9B71A%w(&>yBLm{PEb3giSc2)wmP z!6$F=n+3qhdRvj>&R)I!4rBqbfF&hn2bPDWo%wA}+K-|8vjmY@)Y8n%6r3S;n+pd{ zGGjLX>wmU;%Km!I|4ZM&+iL%v$^R1r;3)WyO5iy7ziP7yfxobUlLYUC#wPmHDe(3! zW^-_u9Q=p%_7CXia~yws0q-~YS7Ub-L~bL}%(si-HYxEgI}zOW-8=x#SaUYOU6l6r z5X=VW-DZH>Vs`MzgNOU>5^#RuU0D9MTnNBj!d9Xv~aoCTBh|6TdNqXAs~w_=7s;4f|jwdZvx&_HAuXZ11^ zghggOfsiG?316#C{wWN+$dnvSWR~o#a0%f)l&B=dBta~>PI(^uD2lC*;ZaD;+hz;` z(!-%amvCM3Tc@83nh$IXCKl@7IRoIdzs+)0)V-Ai-7HgWho*l?Ypyh=EB*JoIQQ?G zC)~dz!HcQL#Dm>>Ghnw~E8S7(k;(QLBkS2hbHLO8H!Cc0E9#ftA~fqsQ7NN=Wsve9XvYFNV+CatB3uU8{PtpAnog@bEd7k&!HO z5|(8x*}}(ZoX2r;YIKyvzYu1;615ywuQy@{hxxL!llEG)W<<2QDm>f6)C|U=n4f4i zP?f`nBe9ElqjUY^C6VKco|paWyY;B#W6z5Zonx|j%67i+sSJFM-KzbVbDUbLO+!)A zGp?=OtOqFcArwrc>^exz8DA~6>y~+ASw9ib(I_`~P-Q-x zQNi{m^kriknP7xJ#QES>CG6$R>B}`iola>R1<4)7QdBB+Ye0gc7i~5@ruSmv%~R=5 zD}AER`jYH>9(Fz(ebjjkl(I8+JBhK#-I~T7;79&HMR#)Fdv(Y zx8x4g5%3s(5>ETA2TewamEayGe$(FIVEJ_|smheE{t<*jeds%&f`wI)Uw2atEp_VI z#EL6SB?XloMQsR&V!`D<7fy2>Xq`f-x^-hr`Jx$5a}O7TgYdw;p+QGN*siu=$ghw0 zQW|s7mFRiT%%(G>&EB@bnX5oD%i}MFJ=x?_F_Fq@dAo0oEbZxR-_Yi4erSCWkR|Y@ zw-c9mOCR)#w&{6&?T2Q;%~x!Uc9e0aD6-_y!B|Ykb;%Rl73lGn4z|VeEOt?iMTWT< z=|!40g#E_;d@ZC?cr#J?x$pb?C=O&s@}o|CFxWUVV_#FG_d%EP%mQu364_eYLU~V5 z^!5A?cKMSP1LwZMnN$h&YoM-abIY#JaG$snaj&A+nPR*bl|yi;wDR((kZ{+VGXK-!3%AJljX4yh})qYwl!Oi8oj~7rIqv3Zu^@;Gjp( zgVfI7ZD4~6jIx5CHuS8=aK1qNDs0YXNl^Wz!tP=ARoI&*Z;bPi4h(F!;=wiQkEfp) z+CJH^D^w8$4D|J7V-RVK3zbL!hJ2)ykSvRN(oN>zbs>V6Vp1#E$I$WG##SDX_+aFz$7P#>k|I|#UNp3B2Mt<+ zryz4mV2pr+j&wHnEb3z?pjd6Oh{=qd?AnMJSx8rHY@ws!$x2i7JVt!q?bcI}q57nF z2urD}0rA7Vl>|+1o>;k%0 zm%#L%`T3LO@OO?!;IB>{Iq)I_2jgKppOlf6c92e%-C&7nEZ)@6#D80j!03ZIwo)A8 zMQq|2Vk4pn&dziZV9~eZM>iT*)E*;>VdF8X5NS3t-b*tw2COxfZG$5Fd$HvNZYD1! z5vuG;cl-M{9|~{*sB`b3_a|HJ@95?F1?1(Tgb-T+=+OI@5nh6ms(Q0GFGt!hZmfst z-4Ll>kdM7Ju#{vR6J!rw&Q&a;944wS=AhEIU7IC#bRd6kvXIL(<=xiae6+B<*tHXf zb~#BXaSvs`sZVkJ63Y{N{^t%m3cP%5EcmK3p|Fgmn~2RAI)%Y%sE{JMWOjc?`dNNQ zAs&<|lWqD^E*>|!RD2VeFy@E1uH#&_6!JOsB1MD{uir!3PpZ-12@Uis z2+1i6*}(u28aBW?E-*rZg3e^sPFSvQswk+6(#KgQKRkKM7yiIoNsLS!9Tw=FAqzJ) zoZu4~LFI)tz;wLc>d%42w3J&&s2{=x0_B-14ah4vcG=O7ZLLJrXSzPyS4}#uF6XXA zDeTRD4oH>Q>O)D}A1&~hC@Pal!bd?E7Yl`XQd_JF3qx1bJ&nHK^R~gzqSdpzR}u-G z>+#wf3CW2Sa|s9nao(io>6u^PTZlkS58Iks2rfPyE(RB+keIi8eOshQW4+fuX3GmX z)7MpM3>$l{q@36eQ_VdriwniiLhf-^go+t7!)dQP6(K_W|Qj&Q&b{adYY$Z1I8jf*`x*l)D$1 zNkD@5UIe6Nd)ssPrzuTmYJ4#dKM0%DK~Sh<;i@S_C9eb$VR}w=HL22M;9;~`v_K)H z5Mn+Byr?#55L^^(js@9g+wjz7#_H*06gR#4U+kR)SQXvc_oZ7plZX~2z z>5!C0N?N)UBt%j`x=|Wwqy$NUZ}#@_Jo+5Y^Ks7mp1NMIeO)kXX7=ngYi6xkbKm#> zcP~*`4c=vKNVMj_zJGCRRnLbQw;?4nE=EuNgZ?N9D<4-&eY(|~uE*VH!@>MnAKyZ_ zt+d=ljq=w!he|eJL5qCnh9way@OH9%i%NxCy!^7Ck85d#8=2yiM(hEP z@WU*UPyc3o90{WhcZ$=#A*%J#EEA~mfs6ztmN9$h|k2hQ7^Egug& z>J^~Yl-Mw(7U@gE5AZMoi6ZZ4XYkR^CLy%QG?Wd(B)doEFS;;0cluy4W8M+TJ)?MP zCt4_;1P>X(!>N(J`&{jqhG2`hwC<&!_`QO&7ag4g&@@3fn(ei%s|y==@6I@0HL~?_ z$(C8sE2pr$*SfeP(_h!d!0w<3&vh`+Sdg36B%O_!V0t46Qh&t3|>b` zzqahiXU-JVm5MbvJbs2}1;doXixzu6-fw0U;mST28Ki0H-W_MpB^a$pT0qMh81UVD zf!f`q!j@g5FHyItVM?C%2qVL0&LDnge`kMSU+CPcqUMU>@`rb@KkK@G$S5NoEpro! z6bP2)6=z~mP9t6rR2zeFy0nE!Z>5V`v_QTW5%hWwCff9cEY3qe$hS#WA5-&J-H1lX zqiPnBy5>a+J6Z!8yPx6{ACPXH2y+MdF;{m>u6@=%Y1>{Cl_k;^%t6xae}BL=E6X+Q5Wga>e??sXiz2RBS*~eH;rfFM83->SXN7N^pg;B{_yhz7u?~f7 zUm8#kjuRO~&8@?@wbaVM!1<&u|9y%!r10MP*o4Zi^2?qK?;FEXEj&2ZaZ)Wd$TdC11 zB18;74eM>-iYS^!C*a$Mhs`=JqayHe%?QKbe!^0nY7CX&FA{iAR3yvt!o%EM2*T;25Hg5n zDNQ(ePlfBvtWqu;q(dw!9wqAO7OXlzO}GCH?tUdtwTlMQJu3&tr4PRK8>U|(qu91w z;r8!gDt<(uC>tA!7a#E(#oM6@Jls(A|8jtpKJWZOh;Z96`qsRfoU#JCBW_X$Nm{hj zq|VBdYV;OP5we}vgluyG6hg}J+TOGj^vGjW&QJS?>@kl%F2VUze87to_Iu96eK)(| z-K0WSC`oAZ$iqbuE@Y!6EZrR+AG^(WDDh6BZyh{kY1lQgi_=m!5?v!yN-G?`m^Bic z=;!C_lWN#8jlQ5CkTa@7;oVwJcvp=&KkaRDCkK% z^Ulq3;e-3*Cf~TrxSiS|ig917)65cH? zUrb?1T?lE_p=syXRJer(=eilQ)NrS)IbuJWL;$zLopy2i0i?2jN2{Bf+6|@VMMU>949!J%g7gZ< z$UKu|Y(w`MEw?~o2_hpD$Ji0Fnft2+tFSk; z6m7stt=^-7zzEQtuTzD!@FUHn(rH|))7>GWbx9Z3 z{hT5HD4oLdY{UXGSVawjRDOc|HN9j8DeTBp;&`)TTL9lo)cq3u-MZ)epHZF7j&+qT zgq&|bFG!_VL>13f!1j3#i~b68L%Z#Ok>u7upowgb*FaaXp_%jRZ3ue$fxdp`PP)4l zJo9y*?uq2d!2>!r(-K|8vhd=eQO`<~H8 zUl*$+M=N9xI=5w|tFH~rBQDkbHUYu@iUYZNTuF4J$|G3X zH?0N?uxnZ(CQ=h zoXy6SI=cu<-#ITarD71lA45^8d0w=phGBJ=_uNLFJ4@3`G0r*vO=UH1;Q?Y)t*RI) zn*=-~-xG4a)-7{LR8I}_HyjBM^gC8hea`DIAOWk<)>XIQHO*+hG%f!#O$%U#yk1}b zC2IL4YWXE<`FDs~fY6xhby0S%ADrv%X>!VPj!d96z3g`U+nW(=PddBP~Q=a_R zA@3_0*U*_(__M2*>1rjcE8WGzs_plc55&Buo29`1ILDAZzh*AXY-Ox4BZ)Aby5?sGfei+i+H&rrA!c z%uh4)+L&Ho8*VI%&09^miVD&ew0xNelHZr`Om%9gDfFUyq5S;wO_rGL_SI8LTQM;P zv{mFggo`|=Sfo-D-qAXCwt3rDBgTco<@87@ZLKP)dRhEts3r8=k-i#QRx&$L4qGu< zW*6~0xC5%2jZ&7*EJmk|CfYFtl-eIYx$plzK^3s5Jg$x z#f~zJnMf>+IqDQ>^^(u*!>te>+x~@N1GXFVZDj**2dcFOZn(CLD>c2}(1%x+U@7De$r}|HmQ?dS- zLE+C_I9xx6HT=?3{nAtY%k)&N;KUo(m|$@IJW*ErF*fYt*}l7RHtv`0H(NKP%O;BN zsVpW@pxuIitZV_4$gL!bj>pkV4k1snc1J2oGK(`F4w1+P4^ZAV@SYZ&$SnSd!tS3L z0Mk=fd3%P83_4KW?XgsIOxKjJ;xZTBU1xj&T+%7IZ6+4YFI>s&(SF1Du(GS4$@UJP ze!{M~*C)}6km5bSThKcA3Gy8cbO1sIy|I6-7M4F1i$DI@fRom{t@L>Gh?gc9?Y7Ud z()swRt1>p??MuCH#=dwhDK^1!3&*2mSNCos!YwTLv=12&0d72Shbuu&E?pl`!dIAr zc=;ro6(18uS+x#!FDnk-op(wvxY@R4x}s;-3O{b5D@|@8-et$JrReZZ9S?KWb-gCv z2_ovr0cvMz{bLtY(K+cuYILl zk>li~ao2qlV{V_W&&DIZ9jCaONUklfC}js}X=@x(4?9xTr?iOEW{%7?7~MU)jPt=0 zxo$VTjLm_uj`60iHkQztFoX!PTh43R*)l~U{c0xJZ&x6D7-VBtqUv!VylZeHTm2zcz!fi9JWwCI=6ujE4rqN!<^(^c0SRJMx=`E6t zPRSQ+xWZN^ZvqzW0^Odr+&!}^G`Hv6-h5&)1DAK;1aW^4HXx6ASGRxPixqF$0D}g1 zle3PGM01t?@a+V8-t?DXA0*|O9s_4JkxInJ9+UWY^L!0Xi>9i{UP9&#^fJ2?5lns< z%bm*{Q!50-rp&w?B2TZFe6n8CCiY7&_`jj^IhK?)5Z)#Bk;S`7{Y(HNNg9@Wxfq1jZ3eU2l` zMx&@`|7^1B19_Iyg<97TKwNAn&HHMj(~y?co0vKc8Cr(7B~8#V>K$>WJ7Y-R%C3*< zXP%@}PRest?Wc}6I5`5$q@cn!OHEYw>u%ilDJsH9q?PA_pb9aY9izFGI2I&kzAwkZ zusT5x(E~@Q6cvY=I3XvMtdfVP-Zh|8K}J>5XBrrlNljbge7nJy!U$XWk<}g!O-Idp z-(f?wHB}`;D$PKiff`F4WavGXYIsIeL{_^*c(~APeKG_Yc~6&&uxI_52yY)!>tiT@YlKEbNn(g55^>9@6(Z|_)L*%{S2pVKHK@txP`Ho z+17>copiX^T#|BO9Hr;^-F(_51gUiHzV}%&{@?&XIYRq*rhD!gL+l;3w15@6abyi^ zu2OvdCd|Co%?pm$iRh3a3WTnoMyWrLsT!JTXw`gG+i`w5UfZmUpN){7jA?{mkB~S1 z+796a8B@ssZHmAfRlE;-zZpZ5z=dy?`iXuIl~u1$^gR8Vt_D=#`UaZL>_RFPqo8H` zmKjd@g-o}L@>>VG&ituE_|w!hU8b4+Xi^!L1zT_8aJQ7H1!wG&OTV&%o-IH7cAdF! zdx9JS3qvj$ohi=%dA!YjhKfh^M7tM0Zw7=0<^~?g*t@k|HH2N$l=Vw`dkyI=D;UXo zjd6*c`zM160RD0W0{Q{mjn554W_J&T0%fPysFAZd%x(w86{l6;{jXF+tus6A3*!|u3N_#jZHh# z2cp|u{RWK-8oPTBT+PP0mfc|!BvzyBq=ojQL-!$DxkC&vZt2HwrZ z!0OiC+JgZJgRQZqDQ4rTOtGuF!ZqVre^%GX0m5lt79jB2LtK!=#=7?9CHh{)#UxEflX4-PhZ%Tnp8#?6 zxyoGOVdoxu{iAurt9NlI9|@cV^K>D$LT#-P7L2k)5dwQ^1vx^)mRH zr)|%AGxVV+LDO*4F5K#X0P_`R2F@XNjlT2m)~&Ptn6vE9E&)5sPm>Sv-#fV>jm>-C zIrc);n|ChXVAYc$ zLBpRxw)IL2qf^s#vqF@&22D)Tg=Y0L0e!edOQW87k7ZdA3MrG~+o$)*)%YLZ)1mV? zt2$1T?BwyMbA3D16}E4F*rVu~A!$yUGU?{N^Q;e>3o6k6HR)%E%fn?_A@Ryd2z;7aE&yXepa|n0L2*i>|skt{L9*2f?0PWCIF7H4ycZYk@(QQa27i zBe+TX>3k}HslR}X^s`canPOm8vII=3$R4Ya3RaHMI}dzgFP(Y|Ro0C07|}JcEDhPh z*~_sCnS08jo~x%Efke!2CJ7#wmKtUVCeu&bt#Thb$9)B)QF=5~S5^C(aU1MxKj=8! z(|%_h_?uOSHc!fHBam{sGI$ITkM%(i;5A5bN+dIQpCLX_DMT9NhT31T3Rbn9xxK~e z;a2^r`J)K_vSEt7JY-YrbCh>@>e{D_k89IB!==W?XX@RPwb^?^og%v7B0Tr(I#36! zn_6!^;Vx*lVqXlA8QVfzw;O-h2L0CLabPndtXNUfY||m8Ax6((DdfkO21iLnXJ>nL z+RY4awH(PUV8XX3U(H7N^5jA8=cB_%oE{|(sX7uaY3!gC1U08AOTa8@CBC|uMr{YZ zSQ4RY3b~g?RR&qzzS=BZfXsKo>OgcHe@uhM9NSMC{+0`FxjeIv-wP|bx*%6Ka9hMr zjTYU^-&={gbTG#fdc}hr5j##IevIw`yC?iyEy-wSpR>DLAd+R#31}IUHknE{&^S#fY1K}27!bF zfW5PF1N8E1QxI6c0qOq=g8(210Mj!dX@Jp;N1Z=>2GUT*;zn40cAFD+9&WL4>_y=SH@GX$}9Yjq4f<8FF-~`}nyg-SS3*h?!4@3|_0r&${ z;NQhk06YZWAOV0W?`K-T{|{R5x3&Ya?cWc?24WBZ*#*9>VFSSo*no7$KVcw%Ciu1j z$e#Dx!jDu4;M)FS|L;hI@8A1d|9{3R07wjgSpcfa?_dQEP+k5)R^R~AGlPf)U{nF{ z=Pzgl;NpF&0-#t2)`KbphARNM1b`(FrhyYwCje3a`~h(qIK{6dE%1PDMG)G6g9NPg z2Q2~@0Ib>vF9Re5(47xj2AUD9Vh1ntkbt$};AJ4`$ag9N(4M@Y8y&n}l!W)UE@uI> zD9{VOtp^$vtojG-17z?6?&06obCd9Xr!lYt$NB!dK%;$ASb)9&@F>1TgM$k^K;ipO z0}c5tY7x9e1Sov}ae!FyEyfAFMHDFf<*dbk!uQ_?8vC1n8nheG=s@@St!6;4`R0=a zZvjXoU@sQ903wxquK}n_g6+@XEkK6?TkF6D@M!S;Xh5xj?)L4ZfeZHS9S*!3P-}pA z@Y@!k*55L1fT{!3`g_V-&;|WAB?Ny*94LTm@FR%g8-L~+bKCzG5(OYg2ioPg6Z*~V z{&%7%SOIRtUk>r7)99al19B=db}*#KQ%;X{ebGdDYug9TT-jh zZC_^A9Js~{ze;5ltnIKI%Y7zN$J`)>FZ$B8QpK+3btO6Gwsgv|v-HQcm@etGrzJ4w zEt2h9FmQSHJpm$~_5D^%9$KmtRD?D-LECFqWur=>!+mOeZF+fK0!fI0CxeqRp*e|P zO~@#vP`K_t9aO`hwUlXF-W;++L+;GDF&baB@=oM7<+95OkJ)Y$T=bI!Q$dZ@k#YL6 z+}SN3Goz4u5FvK)_qt<~>?|I4eGJ7`xas0XiB{?S=9Za2+GXB*$-XXWf$iyS;+Bu_ z?}*7|Q$OyP3T_qn)C!?(m1tfyf4b%y@*myBz$G(fg`a+-@&H0-1$A}Zk|VtTycT_O z+Z67(EViO6hh{SZR3rumMmt{9Zs7w<4DZZA>{))S#O~gRXZ@Ic3`VEtk*zw}$Cv#BCTTiR47<;ll6?xDSJ0_-$aU&UHml_%ntUX+>~)_ zPy~;im(YGs9^rEeYfGLVP6;D!+X=}s&^z1XV(_J}qXWFY;L0}}k;lSJ8V zOlYR1^C{ma%RJbRc8;Z7R3A8D*_8&k>^yf@Vi@1X3K#B(SyE50-J1_4z{sMJ@1#W! z9_I9@KAhl#`q*i{ORwK_e~`8d`sJ3JkXf{tpnDHo&j5A!6(hip8KnQT;eQSo`L}T} zSi$+EuhGUo;|2Z|APnHm@_NG(FiZT&{}|1G6AB2N1ZCf1@PlBUrhcsKzB4Dm(?0un z;Rx>-QR1$8=UZzi4@=2bTv6_#4=A2i>j*@_a$*l4k*3P3r9V;V#yBS1bU>x<`Z;aG z-fHB>h?3KjSuu{f_=q`(YNR#G^zdS2>a;{miP$U5qYXEj8%RqF;Lv_XZ?b~3Szo7V z|MesfUDcyfTY_v5|9m0$>Hj%Ka%Auel4Az0$ z_yL)W&8Jo_iLSyPIxY*UrLAIy&4I9NEuvh*;tw_T>Zs`RqPTEEAq6G<`IOT{K5vae zRK1s5l_$H*cr=u)%0~p zKqY?k0lxL!DRtZQiJAqPK3CJ)zEjNmPs zi}lgZHtr4D)eF9M`W%49UvU;dhA&9M{$M7-0;6<1Mo=<*A70*JvZz`VYbr^5q2qoJ ze|3cJP*G$34Zih7l11C*ZtA@?nWdLf^*EH9743Y~MR|*+ryph=ibeIl#^J9OQ8IsGqMq!tp|K_2@TK-t#eKv{l!QBsTF^dQ=tdc5 z-JZ{Qe&X8B9`StMJ&mjS)-@-|yg%K(;t@S4%_PX8J1MP5B+F0;pH$TDG+#^^Rcg|GE1jxRW)xDQV0fd~VB;W$OJ|-0bxL&2k-_|3 z9sk?axdTX<3!kJx;om|?0HOr0^*H`TNcfK-ByjxT(*Ltj_5Tzh0SIvU%YFP$!V7>?@myot@Cz^S z3or0rh8F;W!2Z&E{%lD3W4y?(Y_0$9Y^|(7#ND+n-7mbruXL^du5_*6;-|0k==?c^ z@E2a-7hd4M0WSaqm|trQ{)HF#U&af7)gjjySAO9Ie&Gdv;RXH-F93)zt~G@H!V6pn zFTi%+eP?bY3DU6`X43;2Lxu#WElki=J`D1$)-1@|Yv_fU4JUc{5OIc6oQM%4b~EW4hatzB z7R^?g)U1(dSPV|oEw1J_+G(+qiK8L?lrEbRh8(t+EiBLr>YCGr;}P3O$kqh*>Z3fF z(FTg1AyJ_Xkhck_h~*#1?qRuLd+HZcKk`zG9_pbnLw&>R`q6GrbU{>hrMSS$&*Qeq zQeE^CYOKMx{ChJ8N)rS=h41*~5C_Se=awAXcs%!ECZuEEc`v126Ui6=-zyHA>@cSA3)AaYO+^jzd@L5PW zxB!hS7jSbkvU0KU>H*o>rNvb3C8fpWj2@6MOPiY6Ia@h?}wG1em|w zmEYGOGK*Q6o0~bB*_oI*k?35dg#WHxziJz39Z0ke(yafj?f)C={{|C)4J4fh9sg=3 z03am;H0vN0KA>&-+ZAO0ew6Ff^zUT?fR6c(ApyYO{LVVy6#tVGxK6$QFYY69_ezSw zH?=)TjSmn2z(RGUuy8n^(AVqkF?xB##Tz)CA{0T{Qz(k5^LbhPg^01*|yiVV;e z5K#dfJq|7a)d7d?f(t-(z)_Ci0&oCuycxIvunE91QQ!hlH*oX?xB#3f*sl*NfFKHB ze>S)P8g9SW05CKHnLdAO1Hkz8&HDr14QN6zr3736Q%Sz>22>PmCj)N*Dhhtp2N!^f zg5OlZ1)!qfHy>~Ts2lhV16%-53E){gs2~EI0(fq${1Z2U>zWqO{~B%r@bv!6tn^B5 z0_c)_|Cq8OUNwojrbz{$bo{72RErY1=?mmD-)S@!GHk`^I23y8-h62wcYn;&7{lC% zmp|(nu521EoJzl<4Hg@1zRlz6>hfT;>Vr1xcEbEY0_(%|jnIvHUK?YWb!P49#9mRmS zGFh3$uTo^}!ZZOQR5t@Po8cd=-NV6?g?}THcC>T9>39cj!6)z2j*_Q=kcJK?)3ILc z7b_Vi?}i2>v=4HObP>(3RK(yzx47`o&pq^VxtMv3a^I<d(XQS463ZUD zR}46I#C|V)jt|h^ZW?$@MOQ^@^y(DfBQ@Jd=uwr4wtaxLY#U=nUF1At{G?5a!d;KD z$%&Y>vV-l@u8h&EDV!d};JjMpaBV(*2}uL#f;B}9{zg)|k;0mhsQBvQIioFH*`_hq z-SM#_7{8P57@7u#OtN;iUIe}3^qf&rstTl+cn+n6rrY-+dz>UV*-PwjIA_acw&MNK zT^o2?3h;+Sq1|BKBoB%E@^}v=@ZY)f^@%ge8~8kdJEy@ZYUW{!Xv~4($K{nVq36#f zB~f+b4j2ScoW3a{FhaOhj2$kysi6Vh#vZmr2{Ng>f@nY?Wu zR)8WK{`7`koZ^sG2D*B;okOoP$C5`@xV{t1=Kzv!jFIO0XB33V`xes_&E%QuEP=_p zsEC=>@fI|Ax5ze=&EGQy*~SB;7BuI(gQ?-RRde?r{EWH~~D7QOiJrzEv#UX7tMe z5B>Wb;TdQ4l4$&;`gyW6$Y#DQglP9b3HnIUK?(ZQC)ei8a%&qJ4wT01ON)G86_zTI zSk<;$K@CiB4-b6+>2xy$N)R`1_PvmyQkU+EutcZFOCV1lWM5^j z*74FZq6|Ym{PODkNuq$!d9Rr88(D3L*;BrOM&gSUa)}kn#aon4<;`@mhSHj5H#5WI z-0Z^9u*HZJwyFj5V5E#PU$@n0Gn3!%v9x3^7q2~;TXU)tWVO=qf6!hW7Q=t{RkY_? zpY+O;a$XwDRFo=SwfD|WG%w0N($>b;*Jo9Vc*xFMqkP31(I$R)AhJa01|uhH&xx?A zhn0BWhUd-;26y*JPNXJgi4zz-^rmvtOB6Y{cV2{S6y470t%!Z;_eVp~@^ zA}9Nk5zeQ#n#Nwi(h8CYF5+aa$OMRswz?&Z;aFP_H6w1mYun<@D=qguWq|oSg#N`; z_;Fk!Z*o(8oPcct?5tXx;+w>&XebeujC{(mYC7X=i8Jb?DP#bN8hNPJKtHM}b_jPf3yUJ$`uaSZ< z-c$EHX(RJ(DID=Q^bV*luf0a8w=a@YSh1 z+4EK9i!h@vG~EyXl%ct?tv4e3lqpVYgYgBj&!Uzaq0y~c8R>CnN-5{&-h|_fv!^o+ z?>D@HO!kuyFz)RyoUR_821Cv6`ZJHd)=j^vRY1?$-%N;C#P)H3@mHY93jn(Vrg7}x z$rtcU{(TiI0PF&9b2LLVzw%jqO>-X?7w?bHYB?Zp=Mvwyyqz~O(AqqTK1wprHe4RU zRV{6Nz-mR7W+QnQhM>)H1IK@WBwy&2DplmcvpGt(b6s243pk#jhfEz`-*qmpo_a~c zMyA|-lf_b@;WO#eIiO`wx}I3APC?^ShQV7&AFa94@IonH)sCZzPmYp{o}BS{ z)wXtTWLI_(mF8e@M{#=h{y8y(hxkAwmGvy)&3aMsV`a_Z)yKCspU6SIc|M7YM8aG! zB+kL-*4SZbcT=~?j>|3T){OD~Hu8oG8#U6MNV+fQ9UPWe{(?!Y#QPYY`dHp5BbvUb)QfkyipvsEl(3##sFpcVhX?=A#)ONsKG{5jSYQdJR)N7Z=YTZt~bA668h@^g85l z`A$Z1vgnt~a?IBGjkpnCjJ$aNE;KYLbSiWVtUhjhi9NP?o-`8?8#ggPwLZA z5opclE>#+xw&{9;v+crWEO{6-Q27!TFP|-28`>Z7Zf^&*BhnihC5P#ub8WS*52Yy( z-K?fz-t6TMan#bYc4_!fYy_+9GPW^Bp{m~iVLg4hP5(~uto(rklIEta-9f);mKQWp zbnu%X%HHWT=_(!%hx|9LWLR^(TfzMjM(WFgiXR+H8g#l@kk&aV#~(8!7gibJSBpzd z*5osd+4eP0!jN!mHLw}g?=o$<5p3_-n_?ab0C_&(GvWB?a(jl>^HaGSoVWV&l=q$^ z^DoOJMw(;U#(cK*- zm%&yiEouL!%mWYH)J{x!kO*^+Cn>9U5)vXiOqgvEhME0$Z+}kwq9}ygf&A!c>z7YR zwx04c`+~2dxOuYgH^4s6GUoQ(mlSgxW!pe7!^3f@CDWuV|U`XOy$nFNS!ATs8D?qm_A|LOI z%@-8iGeYR@JcC;}Bz1GrBs*b+kBe@>D2Q%3Lda8i4Brwv8m+D-tmqnp%OK7#udpp_ zmM*OE#;?fnP_tVyYQ%aF8jqa;C1@u{zO#(CAoMb3$bQ{~A z6BJyNeE23-W`*goV1xh~Oyb8TIbSlvx=B6lmJQiAsI)DJOHNn_ZPuO_dih6hx#IST zvgDTU4TuWy*5t5wI?0md!A3xp8B5~qMp3leyjt+0j6=bPU(PZ?=hH8@D9=&Qs83;T zjSY|rFtG3Gcw@SO4ldeq&-Ho7NDQm>Iq z3AW-luq~@ZRjOKba2Hq75 zR@a2|cW!W$CApT!D_*z6M+j|U{dT(w`MulFiFimW7ef1D?R&yqDGoK(ry5=DW!~AU z3<#=J26v|E^SWr5Y1%RBp}fAkRrdh?d`RwSimUHTaXY(VhDtXIagQ8VCeZGs+4%B3 zpFH!Dm~-*!{z&l;-vYuWIaVPqU`wwm4O(W@j}$gOl&_=u?kvwm)g!Caf(Zu+^&z_{bkp!$IFd zafW^@Smcdi1nmF|u`LlV(eJNE`K&z!UHHSt_B;zr2MdDziU(_7!<`*?tWw_)_xFrg z?<0Tsz(CJ1UdkP`5Mot8BC)f3k2m1eTr#Fae-hhrhF~z}gnfHt-1*Iv08+aRYPZxS z4b;UQlM}=OLMCVY>W^nKEU%&ut)Q3HYGJx-*?38hh11dCx2Da5J3VSXkZKTL3e?&_ z?1+B#xp;Kpbpbg%@9BM22fL=FhmDKlr-z8rZ!}0mgB$TeO9Cotdc6&oHHZ=UW&t@o zu7a!xfnpY%Ar^Px4Wy z>_`7H800J(pmd9&b4+&4$TezG5l7b6)I2d2zZW~*7W>g~Dy!czQ<7RKMcRpPaa}vT z?ya~5HPN<(4C+CpJI4{2>k(j7bs*0~Kf*Irm~^r;S#ZJd zLJF}(d+dPBzuMkZBxbv{)9kXnbBLrTVkdu9d;VZ(`m+IxorU$M_WX;Z`Lx+H!uQ6` zd-;NpgUg91>EUr)6EsNE4H||R_wIYyy@t1sV~keZ!_el)KfU_6IvLvOsue^2;AZht zH8lv@^iIJF0rMg@5l3w`N``UgK_KBt+jKrhqSc5|k&4>bGvkT`MemE1Ww=9KxE% zI@)GN3J$;e#q$p`8+A;j)(>CJ4Tg@2=?sV@#fWk zF^j3uSaKAIG(G&)Zu*4TO2s>hc$@X1@>Pv~P1773&(EegWf@19Z+U@t8BW?fWb>+$ z-7akrzw}YiTLo^zikk*O$nM@%3vmvHFqq_lgl)u+yr;YAO{&{&D!_hMWM4$l=ka-Xdg)G%ld^uiDJns{(wUbi66b~|5mT>-aKhXb)^F{=el;6 z*(M4teve>?E6fN(P7ejOKWVxpmsx#CX~ZknZdro}?_}#jnE)TFuE9vSjKV{0>8~ZG zA3d8qG7hbU`Xm?~pj*5;>w-(>cEvU?PE~Z$zbFW%pAG2tp_Vh3>Ya?LO}L%G7%sSF zUe&(WG$*q0{zTo=Qv&<+8~3rjY;K}YR_@23DR0okqJHz~zXW{xf!!^S3?S^X4{v-R zG!Hg=rsh)=c_=(LLxX=XXq7`C|FKUxkpA#P3R$>|sG~(~>z_Qh zDJ)bm5$DD(Kse^!ft_V9pEZ&nQXn%{0&fez^gsH(g(AP}+Zr{vk@vEE%U3wZMvtCD z|IEXu@G0C!ry6*VPt_ce1laO532WLTl9^Q+Y)mZNW~*=WnszgiP6l%O@=>PaCH zC^y#o1hddVJ3bXa(kPpzHy@8MN0@!I7<>LjK#+QiYn`~UkJdL74b10YVfpq=c_Ujh5@xY~*kRAYi^!}gWoBhz=?tPpvjIo7F|u$X0iF?{ zvjM%7{P2?h`x*SJ9QM55i1}QA)8qQYd~VQP4md^r=&$DnT?}CTRsQ-P-3H&?2mh1* zfY%5#wx`F;yoBS>3I@l!(a@hi&VX!w7v<%!btX!bU4|oIM4CeZ63yeKRJ-OuW9-IuW=xA|L{Ef2OY@V*ECA=vi@Y0HdB|WTH*m2rR#m^+YQn> zF)?31Z@P4a9PGs2=R)m)jHPIS8TQ&lsege#SposY5qOBI>oPerHD?uIb*4Xf@^LR@ z3zp&-4gDU}pa78rCY-mb8ZBfEK78pNT+2rCw&g`493_e!6HgK+fw-F0R;7a5)b~cB z@%jtRKCs?)dnzt~Tij@;iMfmVyeN~$R4!hy%E_vBfQ0N=BqsunU8^`*E!y+4aO^Z3 zqw0j9f&}bnOWugfpkP_VVpPg!kh;}+-E5-+kLm!67aaj|>+So@A4%WKE2yNric~9|t($~a_mxC@CAFIqq`}+rz zzg#_aO1R-)^JawUL;mX#GOsE$+nuFNRnpWC5Te2o-2pXIzX z(s=rPB#*3OoD9!q$q27wM%<75a?9zoVn2-1uzInVN-mT-3qO+EXy$=^uaBeX06-##OS%in>eIs7JD2x zyu!v#WNn^I%2SxPsKrb;%{?^|F^OMi9-nNF_2LeX?Wzsv;1q0N3@ogKGEi{)PDSDr zGxZ4{GsYP)ruu!b8$YLcJu{s1%4pgSJ?-^=llkYwy5`0@#RhD$%m&6Wnh_$z_u9&p zus1Z-BcALH@V8gKJ;R1zxEWcEXV&A=ToFh9Gp~XVpnRTPys6hreyL_4W@8&Ps0<@AJI5B@z~&)~ znsi5tM@V;knyz8O`M5dV;J&W|$K3sM*r@P9Ru_XDh;&Jtn>~Hpi!UQ5#qSeRySXvp z_zoVOC5-95w}<}p1znF8%@&Ftes6COic_`?mgeblZd_SiBRURB;cB7U((PL^gbac` zWk%_TcwV=ip^P+p;1rb~^}-~s)7UCNWz1GNso(bSl1~!mywUtNqkQ`OM&sL0INF#5 z>MZuB`(^tQ+^_kE`5(S;d_3=~sdp5~{Z#+m^H}{4ylP1sRGiZ_@S;y{uvPg~eofA4 zxUr=p1lt_@foKGBFa2xN&ZBani8m9E)Wwf=u9&m)faXruFvtH3yF~&&?S;r> zc+XN;Kq3F(x%kS(6z%A-zB(b+nq38Vz65gKyD}0t(yhpcuX>*a4ekVsu4z@|7~E(N zEv58_9@~9oEDohAd2Hs4Yt$ehRMacS?CKUX$pDf6ZWw1f5a&cFZ5uK?E@UuFj9aL!)Z4~?4`5D&tbVAs% z4lEq7hBw%~_=FY4*=$lra1dpN)?U*~fY0FQ>t9!?;1PnNtj5;TSezTlJ*JC8V_u+3 zfx!QG&J0t(mNn^#*~~==z3b!Q31hF4^wlnOA0b2zy#bj8=ZsOki=;=(s91%?c2`}x zYg(DvxqtSgsK^b^u)t5P0wXg!te@ZhQmQ;|`O%!~*ZpB8Hc{gkhO9s|LI#do636hG z&R@GYdsgy}o!+T>yIK)E*g3*8!MfehLODbMm(eq9AB*+k3CqU~{AY7)NKoNu%UkYD zPc!VW>*8`n*msJGd{=Jy$3EM-73TY8h7oPXn;_f709GSqZf{VU}_;DT6Iv#Jv{l zRFCRbeB&9q!i49z;M&bxG>>ctwUYWfQYY%fCnr{@X6K6geR&Hqy5e3Ae3@H$ij2_P z!L?V>#+x+TZiZi=6;5Z+ayqd2BFVbnvXrK2A(hX>wfH0cATRaI%@6k;Ea#u*r&{uU zc3(3o>BZCDMLN8o8u)T{>T(XFYnfeg)fK#^850P~{?Y6v9wTxSiWC`Yr?Hh-GZ3?j zYR5|LOU3!+OT-FWCcOIuGV5|fBz~f~B09qU?p3*IvliWPuM&-mpOOa#s`s=EZmjdR zV_}W^Nunoo_!A|>$tI|W98`|<8n9qOWPPMFsWolpHow>pp7Y6Y5!_r%n`a3uuen(x zhheT_mZlE(VgM^|uEM58H%!#*^$O;`OU&k@)Z)gTVs8kJTtTR-&hrQJ;&lK0 z@RSdTaAf2H5>0UeL70r3K)52{JpaCq14wEGq&Oq_Tb^^CYnswn|3LqLB2v{(0^~W( z@JDK|f*2A64P$-NkNESbSQ~Hx9}0gV6Q5@a$qwbjeeNA&9;(+QBa+hB`vR$GxoOEy z+dk5~?w#MbV*UG8Y%hw~} z&Bp0wDzntQBacpHgh*yo^RH^)A52}pH89T)Fn%6zh$oPciV+C*D&?3qo5YW z1lMfwkiGT7=xQVDY8nqlc8jwLh&{R}DA`MGLMQ%iTliv^djM zXHkOU;uukG&}Osf6fdeREtEO}FxOw`g=IP8>_}HvkvHoeE!~;G5@;JXFyGZko6c>R ze`i+nw#8&l^&uT97yQNJjvc?s4JC{z_F=C10Mo@o*~5qFUlP{G%h*e){U+;tMx5R- zQ*1JEKORGsn6!1EX{yvK7Ok|-F7AgHf}KuvO743X-sc{YdN%as zbj}tDy6inTUS8d00d-xmGYCPMv^)CJ>x~7i$T^c32j+*C>J>GCeR68VIYTzpnm)Gs zJ(3NMK}!7h!^EGrD8BnV6Q>()Qs(l~E!pOTK9nzl^5bf3cL*QX`z8$cc+DK0BS>wT zRCN5pjj7eB(i&)s_tQaso=ga3=Z3FhHXPVHj(8A`*z7*mQH@C2Av@KSP29IgelMNf zncq0$YGrq)Ga@KZQlqWvHnNRg)r3$xx(^OfeFKUS9)?U!mIZ6nK;o%#{E!yD{aymb zrT!%ZWT(E%RsHpvFPeXNa!0fvG_xQ>T|T{w1YOb|)-9NKu&{l}NJ)8+_i>)WW#~Ct zxHnIZ{B6{QpgcQ*_ot9XzXfrZRyTJ+7!MZS;Kkee zF1(8pOdlN9N13=`&DAA{Om0CIbsbyVOb=it7|HIU(Qv05Zlv?mxZ>;5y5oDRyQj4$ zEUbE}QkQ4*_=%`;Bmh);o}8~Djh&8(?uV7W33I+iQLdahnlHw*+)LUOGxRE{Hg=+IJwt`a>cS#ltErp-X z^gs)SE5mU^?7U-(3r;_WKh(?X%NA=gCgJ4gAGW{w|JZx0pgOm1Z7{gIySpvi3GNWw z-61#xcXxLW8X#zJcXxLW?(Q^u|L5=8*_}R>i>~fE)!!YpZkX#`^PO{yXFQ^5b4wsr z*1j7bg;#BwnlYM0Mp*SA7e(jS!MrV^z!0}JdbBZnXPjz9AHHif+pbAlAqmBZJxAVM z2EutP;&9F!UQ)zKw3T{JEPSZ3E7wS7fny2QuKRU=Rt>A2atwf%aZ$c=sy=WcJoJ^` z5ELg2Lh{f9tpeT18mbXY_`H;zg4-wvQw#kE39s(+!^LX$jr)s+HxMGkAn5P5z#lgI z?EfOBDoa$qmz3|#z7{_{W4Eggbz<++-FhK^T;_311ZG-c8cBUrKDI1YbahLEW3Y8n0`T<*I=ZzUPe70a;)=tZH_&CkI z9irP}8u5+Srl>^Sj>Q-q_V75&PwjzCGwC)QUXFZ85r4GK93DgBgMyfw6ad;>X9G#$ z#~!n^ogIh2IjG2+Q0(!GQk_{~taC_fJ&^EoQ^a@dO;IhQYqwsr@0_j*RJw~vp6)Jg z69!%L_ide(IXXG%P2D)TwPrl`hz)Dgk+I@GuDiPZ>^fL}wZSQK9M)- zOjcZTOE?t8F41C<_P!7AL8DRBt^RggkvivC()n`4IC$5={JgnqAr3dSw^dPQITSsf z04EV=bB1JssJ#f8wL0{JENe=sJ}@C&X?wP0_^@|Mwqk9CF>KF(>)Q?!-{;if=Qj73 z*4OX;sompF7vIRvg01E(Y+4R;FEe0wH*}(=gf={ILy2@8X+t_%OCuWj5VGm@JKS(_(osQ=A7N zZB;6`>475V5;wS^Gc6awxJl^<=dDr4+hEd90H{N5tZ)bF_4r7YRM&J#xqVEw>}ZJU zs%9rZav3!Il#qo+5;zsyD%e!B0Q#+01;Y%fxxjY;dDPNJV^zaYC&w^YsEI&^klXx_ zpz|!}5cQ`hWAei)c{bel%w;CwswGFtR+^UDL?41Cl*@}s)lI=P5+J4@6oyhbBH0s^ z%H`I08pz9Pbr+42DFlt=fIJ~?)MH`Pjd<1jmmkOnh8eV{PA>c$XRH95Uab645I1p& zP(*2fnk~kxSn5o;Lg8eV>W2RD1bEx6*Z#w2jf^c;&4+ueW_nq!Uxt?;)cg#ggo|y zrr$_>pHs7iDYUG#M|RvwC6obR!26g9sk?xcd_!}=8$5vM?tT50^);BD@oR7knY&Eo zzN0*1H4MCcl%wFrjx%NktWYkWUGff1BY~Sn32llbhoUQQ;sl zlUt7kC_+V8;hdfKQWWA@Kf@*5P=dN8UWjetZ63{JPu?Kkgl+?PQXAc2q+P_w`OHjU zr&ndEv{F8q6m~c&bdbRms?T;98#Am{%Kqd?O1XZk<^-dzhGx|Ev4u(s#^sLt_DNhj z`72EBNmK^mtlkzDJ>y}k1%9DV8X7xtOS+7IM;lUEjN*YB=z2RyXV+##5>Q>W*#-RU z7xJQ624z>^5UWZb@pYP2$h~q)fcXg|a1yxk^^scU)*`17F9-4ZVu(RHK)zOnH78R< ziW_eA))OFcQ)gUy;pjK`EqxzXtL$QF*AyToKwt_eCxmViLr5W27D8tu)G>Z(NdSDT z&fH%#@wpdp{^AgJA9>)cTsw?yWZ|;x5==#)tAOPSVOyQmzJ4^!xx!-t9!--I>S~TG z4hRhs>Qohs-&ukF>QO8(8D``QST$M^yy%V`EHvGP^rZ@6{~E1wQeQlbAe-nZtTs}W z^eLrlhL$UED)Fm)*6S7_HCaB*mfA%|;5B@-O~AA#rRpQK{B^`nGB+E^)^1>_X0HHi zh?RhYuN11EwO!TD@SpFZ1GJc)@Jo{ttxq)sXoGhW$?YFdlGUaU=#$RuJ^L#z9G-!` zP$07WZb$xM-^>B{B{fM#nX~$RM~=MfC-@Z&g0Fazhdb|H6fw+Pvmnj+`KLeR5=--& zU{YfHC}Np)F?jgd>ArdjdJKQm+zlnGAB$@xpjxc4rn(kkHk_E?qG^ZD7GpdTz6WQf z3R(Po)5(ABLBNu)yg2OjUj;Abh*k!)1Yy~uCn2yF5okH6*k$81?!e+ zyT;f&2eIA_Ydp!N#)0oJONmgq<^COnwUG+8*7*=*w|T%{!SEEZu(IEdYqp`AMg8Dt z!2-rhLTe{qDwxOf$j^X{zLnP?3rbO_#tPmW8GD4G?f^!faZ7pTLOt;jf;J;?9Ouge z&vq~~zAx(?a8;26~kmM(IEm_2x50) zH^V-0drZUkTRKAzGBR-yUBO~vJEri$5YT@~3&h2F<_TyzV29mf z2xmu~FX&(4iU@`!32`p{;S}FTj;lK&y3?ttHg3FyMAp!pt}1h*Vw!)S$&|cVTr?dy zEZtl_-TsEQVw0+18!m8zQQ)%e7LHce+rxnio4IUff#HZo*Bwq<1=1f{qs~eF0W4!? zA5CWHL(xExshRy}Gj_+G5{2;{qP7~%tU8ds#)FghyCE7UL#Yg723=8>SQzvGL$_vH zbdVPi%XQTP(w`@jNr-CWd|7w-Jr$3ai~PI#0PwB`O}+`zD862>3yXjg0_HJP9#jUa zoPnL+N!^hFH;&xBHj<)GK3hnVVVA48l(>)Y-9D%pue~Hv;MhJc6WA+Gnh@?hv{Je6 za<2T1aJXz0P*0{?t>uKegc&eNach7FNI^Gt?krmC z$|-zCZbtG$vrf+O8yZI2Z7?1Q^!(o~>ksiB4$fb0P;C{dxMND#zlKsD@b>*l$F@U5 z7h9feC`b_-bjyq)_)y%`;xI_Eb0I((X@zXj;r63e?a$l>NE>$D zA>9#-?#>8Q`hSpGpB><0IpL>l9Q1h;6A`PC@ejFM#)f*L?){FPTO zU;I`}=y6H~rTdu17VMVqjh8P@J@_LHtciA|Ndo7OB%eI7R@Su9FD9=frR{#ny*RO&mi*)_+5aH2)lbZ4mTViaK}oq2 z&|37CQsDW8N<{ChNi4IRwLbydWhh{`T;Cvsj9wS}D^PhW`skHyu-|AyHc1MAFOBtk z-t4BGvx_`>=hHxboG9&p{Y-lI0d6z(gWD0AvjL6nRi5sl+K;>;##-QheaUvrg>jSE zSVjx|C};s9M%D4C&tz}`9Abj_HN`~Ps-V-SXyRx+FEPoo%OM!I?x7iJg z4Rb%c(i7vY+qsY~hMW$M%U@t@bIm_MtXkrv&>_z)Np_{pmfsrxk~nH6e8#*VF95vN zPg?Cq!Az_&LMz666IA5OgVP)FGS-Qpi zO({iZ=a^iPV>KDN`&h&_qQ$q#oGwi0>gxl;7DtW4!&Hvpv#dQ|R#kAEP23UejZSS_ zWVYno6?%n^UBc@fZMI^&Vi7HR$rPNo9rxI0v*28scEs)W@izeJ&(`0)#Qy*-|NFD$ zUufYVk|jH2OFN}Fe?<@c#Rx+(!uE3o{RGo3oCKl3bPXb!KJXY6NH3giE>I_dhYK~V zQlK|Jx%%!MQdUK(dO4hkcWNpU_#-HsuYg0>9;0>?tiOxjCl6-kqZQMBNDc^C@__N} zvAqmD?x_yuAq9UTR2xS*!p@j{)y*r=16SQyonr`ps34@%yLqPSm0@Ypd+TY>c5G6c zS?)#Ig^^km`G=oeVqy^Z(Gf37po1>4M&aqA|#N@ni$KwG#;9JwAlpBa;+iK3<`3#KQ`#F6HdjvT%2!3V!M&p|Dbd zZjTi_cU5pqU8fKk8xhHHB6AjGb?B^FOkWUb>`!aTU}ukuPuDgEutSy<8y+g$!<+Y{ zxhXH%p@RzzqH6^h7(sO_}dXLUrUsy?MP8#pOKgzxb%5xyb} zY+RU-5uF~3$klK%3t>7dcQ{?KD-Jrl=)yX>6jb`+KfjeWuve(VRqq`+nm~fjfe}fA zZ}+%ZZh=@<2t&qYZZVj{k?e+Gn-PR$5#Du(VLlw{TsqgF^A#}z%m}pd)+fgnB*L*k z@OzK3M8R3K0hGM@Y;9>*>-MzQ5gGFF#$0`UKIp}#-MFY7t~yNR2U;Lb!{K?8Q;jMC zu2mbzOrjrPX4XpH*;7V3-b>=$RwB%1%PgDCRv3-EUEE(yE=sKDI6+#^+We-jS8F1$S zO%F(d8yIkXRiaK`@lHqfkanWud-PXX{e~>OFZzXukqqFu8J6b!C}+u2$V}}?e@dryzJ?+0veA-HN^HY! zUcCw*x8QxL=1#B|Ty1KTaTKO44f9?%5!DBlPJRXRo8?UdidJzd)#K_;neWYJt#oYQ!Bv8{B(rB;;ZtCcr!J#fTUhERu}wtsD+jE@jSPuI73Oz*K|k4c5CU7Uim& z8XN~+?aaUszRr$j;pLO)rsG=*(m9$OA21nEDVjfO`%!L?=8_0$>TsQPFUYX)Iuj!H zBmI@|axpTh>ofKdoGyt}@|_xj_->`XGc8Hp*%1O+S^n zliGJ!jSvfz&G#z*sxbWeb(#HsI$wQ|iIAwM1X&TxYC~>nh!n`}>j!B~j(uiA%<9q- zKc59{<=%Levta+6s(Pb{J5f`Ca6FSaT8U&YIX{BtpX{t6OTwl|n%GzL4G8_?pkQy1 z#rEx0%TYCL!}B*>dxEA|l@~K^*TFL)@Ua`GN4J|%*18A!a|mlB#D@!g(MqngX+6wQ ztfVStLc3VUC!n~VE(6oocb;RH^wc>r0SC+*KuzFX-#&d`DaMHNs}&rA5#j7B>*$ume+aWOGXAyiDhkOTLppsEY4%^BOACSLZuPAuL9_?l+^xS7JF26Lm%jIALaM}DdBG* z{{7Yg4?Q~TW!JM_w^%u+meAM+GriT`3+V)*Xwv&Q)pHo5GKi}C|GIJ}z#q~@SbudS z6nT%^#wGe&z3BseGVH#znIiP`rW&kfAxlaaN?MB;3?L?TYnCRJ~C4#hbh5A5DbdcdX<1YPopl0z0C-?`s8K^lInegiy2aCPcB5! z<5}^~FqD{Y%j|5mwqCj$UE4?jpR+d(Av}XP2%fbuH@P)ln@?Fh`fnFGFr&qvNG}u9 zm9DCG7uAfU0v|tr^3*L9o0U*-!c`q0ix z8arJyJzqEM~Ga7gpyYt7&>JjpM!2;wz6G&mCu#LoEOB6TB|@&9UW}T%eauFaB)+I~u0fNH{jB_!x7PEM2~n0Z*2{C8y#F)5JANaw z@I4hmfOc~B1@mjvTWJ%)Z`>Q10Dp>5vj2hyDJu<^tpH$KnxWYU{k>yvI3Vp8uD#*J zC61E24f;{ryJN+LKlozm$H+n$i0B3AIwDzg$YI>O*EU}45u*nX#ZyaLJ3IS!BEg}A zWat4nc`%TA>4+^8b)lgl5SQLnHo&3_c*AM(tUGKj<-$P%?8=g4LHw%+Se+z?XN|b7 z^QNGNpS^*=Sewhz{PyCsef2o}DwTg`03e4$i@FKt>4(p#9KHxL+LX!a+a@7!s+gqy z01+ShY^Iqm%Br&l5Yo5vIu36%V&NW^CMkrh2 z%lRk;%YGMEt1GdbUy9MZbe&?RFJ@a^<`)XVs~Y1p-$WOB^}=?PC_AukGj)jFIz0LV zw`6eM0vrnMh%9cl`m)tEPD7Cl58wCRy%)2{oVmd5*IJ2#wVs2Cr@$? zjJXx1f_JaS=7VAxKtwlu5}&4tZJe0;dX&ng#dPCy!NLrh8~yy-IIR&KxRqG?z5$Vi z9d=#Y$laFG-9uUtZH-HwPSi-PMI3YVLQ~ut(lry%rRDY+x(nNuVF66d&XR};MUR*!qo{jnzM!{YA;dWdS=P*eDOa3mg z(yQiHhA~^Wq>Mi7N6X5%7ZWzme1=peqQOT4LX9q&&(PlMtVZJF<;4IsbWY%-zPaXk zi~I_y$l5fr1XrDp68?80p5-6s&yEOGQDN17cQXDVCdtD13ztVmaoCRReR@g*jXaTX zFT^um`?H(l?Fe0*`BsAN;sl&1yaTu<;2k6v@2T8?3D)q<7D%hyz&CCn@Y!{h?-}r^w-^r)%1(%1!w2ukS^BcC%qdEs#lZvjl5&}!PFvU zSEH;5x4H_bb4rYI=VxoX*QcxOMoqt+Yxy{5#e7bM6|Y4|d* zr_Y{!cY1lu-?RH}CJ&kU_L!G++uc2)y|+Jx%htL6?ic<;EbL#mwoKIMU*&}8nRF*G zIdRf^H8BnHS2gjO^CI9(jJP-@R+v$q7nG`$f!rRXi!&zA7i273&~Fv=XEz!tEb>tQ*~RH4mbu38OFiit^5z+ zvVWU^MRLM-yw@AP3&3;P2Pwg#;Q8HJ z{}2^mWBirXsPw=2=34ylWFI?Sb^3c~OU~L)J`9$trgw?+$4UC+Pm%(redsAFu`lvJ zHZ0+WN510Y3|_x;mCZU%(2+I?3G+_u!&gOp$l#>gBE9H8xW4;C^{}aZ`^` zO%DT|`RshbDf9e>ij0>9W;yu1J$o2U8}-IK$rC5PK3247FH^=B2klcgb;Y+k?Nvxa z_Rc|^&b|@kPl(I}Cd&Zj-id|Xx<9&H=ZA%BDjzGSw?<-uvzl73ruY6Vryq1 z{wW7O$*#Za(ZYvws--h|0SwaOs!w8akShQ}tDrvATw*6{-q5uaO^(Mvst>W^V}zQj zaxt`cC-Xl0Uc$s}4#Nj4!|H!*1b#4~`D76Py-q?Sp{fp|>L9(X0yP^4IgBSGt?Nh} z&h{Yv_y<&m6BjQ3Wj6{8Mhm4}2hx2<0(@HE-nN5&3GObqNedSFlP12+Q;I|ThomH< z`MhADJ}y^;x4%F z;3-Qx^|z6iP!=6<(h z|FE(C_uKXVfyEvZ;2)jn|7K$TL$|^DE{%E@+Hf%c7weDtZx!KRE=K?7M~s~Bj*ow_ z2>)-mm`s2_Y`T~M|M-XvQ@eQ;WKd?>M*#)t9)q^)qOf`E5PGrF*qYyF_ zupgAPg2RBGUI0eq0;J;N#$Jb)S2Xc+w+NLu4YQ&?G)4XeG7?+LcbqutHQ6~WnWu`_mXKjY_*RF%?mbM}~!IZCjC zKLeIzGIt@Y2=m}K;o{}+rIqYkyZq8M(^Bs`^{Ok|rXuZ%=-AN;B#Q~|@xRZ(TczaA zjDXfP)Jn@Xx$jn|m#rZ06e$-%gze`#NIc2Yef^2$s$Q6Hg)#f>c``R@RiJ}3w6469 zb>uCG7$|BH%#A9S`2u{N2kjDH%FBXu6%Op<5c!X5Bldv0t2O=2r$yM#A=KFHQM)#} zd)jr@w*L8hFnAz8>fi0fKkO_yewmBNL~z5u6Uzjhv;AO%nI0MrJxfab9o|^&(^OLV z&wXZq!A^t?tiUaX<+KtGke7YwX}(Z3zH)J|b0mv}{k)keLf}~Y=*hj_ptj2N;Wz^u z_BShI{KL+Xk?GfFJc=9s-BnE3UA7a9)^()+)-#tf@b1;Br>|3!5%wJOnz4b&!`?TI z8CSoBU*_b6Rw+R^z);c;O~C4^+hJJD09{20n`q(}^rv3gpUEiZ z5v3kn(7) zr@^dt4Fq2(-T)X_zKie+$d>SX3v_Ma&_MINS_jrWURQLrJRnZr5|?RW)*sLo_LH5B_*7%9BeV{?`bRqdh>N}^_=Uyxy>8jzE!^mE zwaJ#$12=c}d^U#usm04`U9)oI5`1%fKxne&rR~=%XrMBbtftQ_kF%ZjcM`(A<5khj zN7!Eo&B=%qQLX~=%Z3s>xpC)1RZ$=+M3H(T&kwbcb5G0@Wxg#F+JLx zOJF9{ef^?9auceEo1j8dMoWsn6JI3F{o{r#Ok8UpGmFrr*&|sW8*;Nm&i8cb zv%~J9O|Y!K^x=iL_N(JoS`-E8!D{7eaOD|jf5!@ui4l%vPW{?Ozr0u zQ<{SzUR9Urju|&muGW;+L-67mA9WrOa>VaqDgMquww&;ni))`C&7G;U^_Wk%i3A&8 zo{eNU_Fk}SNCSQz@6Rb#4oy$ZJ+-v*X0v(~m6#mg9TC8#%KomxX8gkno9WlFu8e}j zUsuO_-o>L2oP1c_owWQl;9U-J9w+_jD_kt`0l#Djl+KJ@DhjB8w60mF>?-i}GHTVP zdv{F!9XouS^DX;uRp5ccO=&%SLtWJ-oZK0T95NLlZ5J3790SZ6XD*j##CBiZgA$s@z>}!Gl z4r|0~Z?Mvog~?Y{#bp0=80OaH?@zC%OP|sqD6F+w$XUMGmwcWb+MWK+1@`!7^T#X_;erm^{hPQeP&2S~Z4A%HSMi*y;CXJu!NWm?L zj!56|9HR4Mb~XBpb;NqwCM~tv;fE|Lv@;orTANc!mq5yBfS+b8f0V;MAr6OiK601Gm;b)}r9#2ZcG!UZrq1 zq;iR-P%D@$zLs1u4v8pfzlP&V#?iUdz;C4{<+o5b98C+;9}Ft3qYgqO{K{fkZIRW{sk;qqo&@)}i1Blb zeMzls{p1`DwxOokE^^X1MQ7xRIvnmQqF`~yAx+hwLbph%#)pl&2oV~eg={(!FUUm( zo*-LOP)f3OgNfkvF_xDiN7dpNE^qI?v$__+Y|CY(-|s!%Jt9QBX=VIw!~bDt$;|u@ z>5NN;jKZKDC?n+aUkD9qaKn~*@^JK*S6vM8>X)?L{Xz%!&WPtTjw#=hy)a_)MQA?7T)sKp`X3juK5=QA1+V1~h;x z1Zut&1IJT3N6!L?8@afB6J)*-CPqu;{w%`=*^$iowFslSa;vjxi|4#4WNBx2>>0g zs7bc<$J+0=K)&4%z&h$m0Au!BZc1jN88PA%n%dGBw>VQ>8h%zKb`0EFPjL|lL#uY% z$zO*TJL)T~!|!^svP!?3z!RgJYqSGDcA&Y&Ht;xawzDdez4AI!_kt4??PyYA zPBEE9+6604JF2F*2&Ai|>V3uisC=6E8`%NV-#HybMSJvAh7=N`%zNl|cwm93niixz77!HD%N-yB!bpkf+MZjB@!ppmioK_?#?lB({ zhZgNMrkNXcz6dRe!2e89$$_wvrM3b_fJnPQ-^&A*`8%8pV%w*~qooKwCKHLrNxU_` zzVCz{Tk1$2*?K1k6g=docBgb0prz>FNiZR3qmMK*^<5WtlykmS`k7lGeo^~sSG{-1 zc!(3*VNw}~*@h6GVW3h1LMGFUtWpTX*+or%&qoSXD(J%nBbXH5_b@hQJ50*Q2PW=R z>Zh3rHp7V`b^Wu(j;Krmq%gzrNpnFAUT^OdVer|DK;WaHCTXH6rGYC9sWC)>g#=z( zv}mRydNFrOpM)n!e`@r#r(j_-Z_gy@XR*qtLK6xNB3W6{f9I`X)`jB{09YZb=>-jK^47~f}B_DY>=XfKmDs3N|V$*X&nx_*iD<_@i&J&$9j_m%F6+{g$ z$&__S!Rxu$2&w2jpU%S*>Fqgt^x0kHRZI%Poq1cs;L`{zQ5Nlkr^Dy*&@XAU&t3aG zc<}A7VvyUVJa5cxY5J+jQqxej)cV! z=AX8KKTL$-bpCExe~39Sar_H&kh1S3gk5%k)+LN+$IPb4{9j6F%K>*5n#HI9TEa~z z$b3kQVmLx*Fwy&5(U6wr_Oi!PmlMIvH8t>?SOUaIN=n-*!_E)H{&fzL`jv2?XD~I_YKC-~X?oabz{H;CB!D5AlU}gy^3e zKqmTqa{XU%5{RL-zAY;$US zmW$p7_CIr%Od^&VL8^<^b3{VB4ZM={k9w*baMv%vIBa-1e)Gkc{t%w{r>yq9Hi-N$ zMPAmsB9FFTcHqAhd8q8yWCx^(Wi+U;EN-w9W^%ZwM|lf${!wgnG1V#z8JAJ*fMT;v z9hF$*49(Idu?KGP#+?KR5?XAF->vNrkq7o)<$~|ceL3yBo$TJbovhTzyDj*;*U8;k zyOr;W$Ds^b7NHMhTnH*6BnCS1T#$o}l}F~q&32;$`@t7r*W!O1};4+7-1-Ep?~i+uhi zsbSY|{A#AGwd=Fjx_tdriSH?kF2UWo^er-h73QjQDuVb0+giFLyNOTTIFp+x}Xxj zaOOF&2k(Zb0T~iM^Mta#$tP!oo8vFK>bSSa^im9_owQF35p6xI=Mj;AG zuAQSb=P@F#4Q-i~%on zF2#`44GVen%8+F~Mfz?w+T%}r2=7CdTSU)je3^xer5!R*s5gN#fq40$%)~%IQfG;@ zSALzCB;>nliej|Ch2)K=pRU@R@>LJBXHrIN{zi3TO@EBBZi_SpAQmLhqPiFq?&wqR zoqUUtK4_}Gkvj=Ahbgk{U9zj2>7E;KK^uu3`vjT>Riv+3-0@3i(;&sUIgbBC(my-|M(m$CdpJ$-lD|6A(%w^03E>YC8a z7<}-ii)VIlDIqQhMH(6c3*?Kc9}^1l6%5ct(6rV)PIkQfs?CN{C-L&=+>~|G@S=tC zIfck0YIIg-TBTc4IKW44+sPRN6WI<2(Y_P0Yk8v)@}L1;rLp$ieH;wD;3R#@M_}t* zO9Yg}QxIIpO>Fp}D8?w0mq)e1XIxPpG6Aj!l74O2A+_J#0+XGUd`BaXf7JRY&NMg7 zes4(8;(;>6J3boCA0^-kjf@AwbW}QBZ_hjpgnN})em(g+2SW9gLCIQ3n*(v+mHI6~#%7>ts&OI5;akPPD_&s)&UC}H$GRLfe16@X>Fq<7>uCn5x6S*Y= z9OF^*0?T!Iab5GaDJgCE+MihZnpqVdAcn!}#Lk;C7u>MpHx9-VX+h&`W&NgUQP7J# z4F`RaS3zqx<~GN2Nq&I$>)C!VluQAYdb);C!c6Nd89ztX@`fwC0{EoH zh&?9WjPE!-p(QU&0;ZZ`c6U;NcR3M)Hk=&;$7;x6By>r(8i-V7JgJ>xN+(KzMTFfL zy?#eJX32vEVthJvi+h%RWxH}WALQx|X`-;nWZ?vz6i{zp@&2($Co9}K=jZI4!dL}o;fTXfRVa#YbP zTgnXaqnJRAW`;-Qr2X|7(&D_^yaD4LruD+ayO?+y!IyTU9Z+QemTO^74Qu_k)>oh` z4A!FGZRbD4%~&~q;lKQ~op%6`E$=z8{P0!X?kkLp!C7y*6#Q|e9$gxQ%n||9P~@cD z{28DeMZhxmol@F*gecF=u!ez!cIl~Pu&AI?{=RAFT4V3jX%bjR2zFlql(TOl=a85^ z9yC0vgq?)bI5cRW2ZMts+@bt(FVLM1toc#iG<|#{fB_ID6Vqbd2u&j6HE0m4#34=` zfv;(`1aScA$2b%hrNc$jOhgxbY6~uQ>JjcE7)lYdE(Zw(8;uuex+LK6U{_!qbXWtI za^l()c8NFGwf7)mDxfP zFdsV+FWgU5yK5UAjmWG;`l@eUWOw7aQ!~P>tEjEjEC9L7?h_;t_kdxOTNy5-wt&vT zAKYhRTp3oSB&QZRHkM*<+e^l?VOd9N*TZlYpjYh)aC7@ zhUxgj*od^=!>Sz7qQ(0Qpt|$Glr{5Maiy;7oCDkEt3Gzy@@_#~Uep#jzJc;+rfy74 z(XDrx?Dg1t=g;9&DQ#nyv1>3N>5gTt?q@C~tf*^@&U6*;pS8)xe45-@_5> zGd(N5_+`Otd;roNgI!|@gK(M4u+TsWCTvtAg=?w266T8@jquT?!C01umE>)c|Ae#3STb_ZnLNhY zSrQvrm5UXFhp6zW>;OHn?^P7zg zp=BIPXBRNhHjjA>V=JbuJviLsX5lf*{&b=6YvvoqRUJ)MIm;|pdg^qzh?6ey7K=7F z@Zyw!SUe*$ii{F3mSFA_T^oNghN{yHFiC3IWI>7?6gi{^G8wDpx_gC)nhrDWXkU~0 zJY4!y91x*}4{U}85LB*IF$}5urx+{GZ~?j@S){qYq1s{RE;(bs4GcNg8C z!eVT{k{gtz;>g~u{QnAkLAwmTY6%ThcU<@8_RU?jxtai$A(TM+(iPx{84~}2LKe~f z-v1M1X9pJO`uNg%c!drk!Sun;M=YbU`4L*6#N-0-Vo+hoY>L?7cvMP*Uc!J~npCQe z7CMOW2kOe<(sS30bLE)S3O^`1my)NqE6Sm!ka-P{woYjv!FvPG4_{rkKO#IGd&)BC zit1^|({|v$BxXSSkD-Z>QiOfW12~0&qb{LS1KX(tfD)2<@FuJ@z6w{4@C8DSuMMRI zY+i*}Ccqj^O(HjzVDQu1luJ_0mmpeb%zvFDUz0ZiPC^m?W-;j4V&<2=ruD3R{tf%< zaTA2SVTU>%a-fUo*!N`*!@OgE0^;-At4E`8b6oX863 z>GDyPZ~T;DYT!9LP{6VA%=eEG^CT}7Kdc8U@Xr=sZLg3}DobNFxaxD^=zS_DG|S>0 zNz_)!gZ!2gx;B|2C$r{!%9I2uF2r!TgY`sYCmcn}{SzrO>w~_)R*87a;nlM{r5VDUbGoL}5bAq6LL^dLkPOz53d74z zLAXCvG^~|0DiG}v4i(*IzBY!{N$hMO#}&0!ynY)K(%vmLr5BTGO*i+BRgj;CtDP@n z7K9uI%!UpjnRdjEBei;yBbr0X>QB_JvTJPDE{ z6n*;a8wCXBD@T|weqL6OX73VWyDBM$^d=aQ-BAlm5Axj3&3U51;1_j{}3c*{1UBes93x~)^L@DO>=E_vIZG&Bw z1K?dyKfZ_e=t>Ia2f`%WOCn8<`AE@6X`-<{Oxms7i)xg+OJ-hn66`-rm(n`elj-EY5f zOk?^xbo)>4u>U`WQZoIWKK=(!h51*124w{q`_sSY9@-zk-+=cd_GB=0&)Pi=QPNo6 z?Zq^!1IweL4x=QwaC6}keHp0pfo=!ysfa!Lvm-+1LzC)i=h6@hP4tdZ5ya18D|}1O z)kX=a^)e$@Jo)>~6opY&TCr-xUX%w|?Xu+RT8;^~$SK^ybta3{FWH26YnDwe$1U&J`ble;YNivPT4w~sgsDXxEp z%EDW@Y6y#Bmfp!T?OTL$@a4Um5Q|IkALVc9W%f1pC9RjK0#%U2cbH&`S*q*dw_Y>^ zbd6P>xM4j}tp~c~#NBs|hpN`xk(4MLlf?lsg=;CU{{Wpz7wc&KxNlpTL8${o;8V$DoG`?#(yoUvB3d^xK$lKRsc(Qj|>My5(YG!|4&2q4x)6wT;iR`s*)1cr| zWx7z_vIn!%8d%i5Hm(2gV?+$Piehx@V-tZ;`K9)2;@cE_wa9O3hrdJP|KK-dWco!6 zC=(?G4Z;X3bjKG+uXj!n#e7Fq0JHbHyd?|$qq;IbSjj^mN2n8<@9P#+r_>4Fp-O!7whdKfkbQ1Ip>l~A%rCbHbEZh*9gYd^hdTL|`Q z%46X1g4UfiO^Ufdc26Lv4$6z)eTRSGslTBW|346p%EbJK7}tNoQU4Q;`ad-sm5KQe z0kd~r{y)d7B8r2N?*z!FW@y$8{^Tz+%;CK}OfTLTQwxh8(o{sn-9*U1pbCBx0kEmC zkRMqB;7n-!J_!Z5NY1a#UW5Iyw=+ehDR=%vtL^7-4LY1v}5e8*3zLrysk81tgj zgG$MAp>u)LaSaWTD;_FHE$+A=Mad&%K!i_e3?sc2MQ&amLxw)hlLyxdQsd9kbl$?IlTr`@%+u34arOT4W~ zl&EQ6V{n7tAY#urGNb6K(GkJh2^)wN*1I)3oQdC1bPLCL)UETx3w**^gu*-FvF!So zbpRV@btuoRV57+HIfq^Yl8N|Pe?$hro-O4e$e(DM(LKR_@rtpLf z)zPwzUvuo9sY~SDJ)SpAV~ObR&cQ##f!O}#v7jVn|5vd4I}N*o{^Gq4oKZp_e5qFy z9B^_{vxjLi=dbYrlCUL)2gW_+2WdM`F_naBr%~|B4vT=;*a%Ne1(qdnq$0%1uy&>L zfKO-`8JXFDt&?I>`~U(IU@JO=Hj>XzYFjA2Iq=;U+_X^Z-S;j+9^i`tp}4)^;I&Md zV{1(q6(V|g{^S@py%EJ1Z0>Ztll3$Kt?*bkIhQCAC(G$bd})hK4s#I*s8lq-QGVVvDzW!6g=Z6j9++8ypuMA zXA7Wv(I{Qq)y12jhf>F_VsWZ?hf(QRrgKSc6^@bgf5vGQ`5c$^D21kn5U$0eC|rVu zz;jHxjLNJm$`x)WG)iO*D8ZF%IAE^dra_?RX|9O(v$DKYms%zBH1gI3R8WH$|9~tX zi)(WY5{(`%3*mREwJ$}_+=?!bUA8OmBt7LTt7Ka`qQF@Os@RjRb#;^^2;V5BRkfre z_pL>I4`6-juuwW*B42yWNbe|Qe37*i)qvSiGfc%)bYH)6B)7T38OHaYsY=pJiWs_X zi>27~PpX}sdzz*^$?(gr}?FGT|3A0 zrzgRCKCMvg`xAJNHgj;aH`cfM>#x5G4W{>e@;`zr|C7P|pA6>zp&86f%zp^U{i|M7 zexHewazeKJAML$$T$Rh$H*C-XQW9d&0usA}?hYwI0j0Ye6qOQ1Km;iX1q3Ao6j49~ zL9j?krAtCkKqOR@cdo6T-`RWnJdgML-p}Xr{&90YXS`?csy9;ai`1Zk+ z-sOLoy}#dDpbNocrNfD5mfKw{p5OQ)w*8r=C;F1jel_EV&w>P#K7WtDeLN_<-E65d zBb^!cc5i71(#ZUQh~+mPorT%>ioC&?kKNxKnM4E6D}3#>slAp?$6K6;I#xQ4RWz(+ zWx<}O<=5dpCpNZ&R!;xD+dHpiDjo*ww6^zxd%AW!qd&aDf_8o7{AOCY@cGWSK`iuh zVZs7nogykQci$l)*IQnXZ@-ZU+#A`X6qWl`Nv`c%mhX7anZ)_Q&*9a_49K)w(xzhiRD9_j?pBt&|l{KWcr3h3$GYeeu~3lrW}k$Jc~pxGCK5&6SmqCdEu+ zQ-gMxS-uf-8NE_fa2$8$MCC@|A%D|$nBT;st&Rhs+48%oKbU-VzQS5(ZhZa8Ej7+p zkLY%`C)*|+fh`K)_c~fjU)WMhxVS5^62R&QtPCw|_QlF!_>-I~t z7jPZV@s+g9{U_)y#X0#_2{+tBo#^v8<$CHagG`4Pb<44<{#9>67cUQt--7wSwq6kx z=aISe=~9Z|fS6OBWZaW-nKkPT?LnguH@fOQA`>>!J35popIIKy`LTXGKkMzG3B5xT z7Q1GeA2hi|y?b@_bc*%XJgu|eN)J9$wJDV2(8(LxIUm=-fkY)WWjYMIS5uOx8|rT} zO^23U6XVSoKFj>7LT#Pn!|ASqn|VDCz8m&Az|*WaMJ)sR|1tTj0c*glCjH_b zzRWu{A3YDzpRb@j`|Q+|TiH<+6Ph4wU}5;vw-z&AqY8}7);l@4?kg7Lq!*4|eM&cG z?S)e3=H)X^SU8|97Q@}TEAx{2xRcOU4?0-W%w{I#Mtmo$t48pM@|CJyt!Y`dBur8( ztt@KH$>ytc6CGcjfZi<9os&kpI{i#Fb7QhkFq_?M_B~qM&Kh$~Yya7HI!D{e%1W0j zfyK+mSvzl88nV7|wcageDPt?>F(~XP^YL-^@V?R({E2~uqg}KH>&4nlHCH%91!`VlpMur&+vBXe+VIM-dHDAt_K;biXeeIpqv>)!B8y=01& z_gXtl`c0HBVd3jXf+K2qR5?Bb@OYF7t6-u}#OmoYO)0V}o{x z&*-Lg^GM!SSA}7mvzUhp2{CJ+Pgd*O)uj{YasoYj|b5vD|{NQ2OKJb2*n$fx7$7tbd;i z(d@iuYRj@{-)&Xvobp7ry-+%Gu<#fiR=}Qas z5&Ib>{E3Ur6K@xW2%YTV-*_GqO|c(uVseVkZ?|Sx~mo$0C#1)n1+GEUc9lD+?spa{ zxOR;1p;W7JJ+x0<#z((Im!VeG0D9{Q_8LylOTiCoo5pyFi1Vm7Oj2HdRO)EJ?*=W>%Nl_*fRrP4$}Tf%)eZmLc)ka3Wkb4 z00si>u6_~{dv4p}TfJM+pSSwsHM{yaY?*f0y%!cs2zsfN+iJCUBf)w)=pxf7|3cN| z%$p0(1CAx|dodPYGcnmNlJ#7!T*jdYq3CnmL{O~HuhU;@$dV&Wg^;^YJBN~Y~g*LKBzG46EpnEN77yttlFzBd0Za8XM!P3q?G zJ?ndWpGCrttgVMk>qdLqc0Xatp!y<;}-TOczH_gZ`T!3=l z`p0ihmwe#VL8v6!+2A>*`RsId>7zexH@FYgv{(jX!aJgw?dRWIb}%P}io%&=*N(qto#RKf=%aX=sT0%cT}L@rs}> zP!2Fcr#Pi|m!rHXu_^4EjBI}S;;33v`z@QM_8rN4=ytOeaoY^%Y#6Z6VU<(wx;7Sd z)aI@o*JY%Y(sEPbFnVNs`4pQ3TGRiG%hN5NfRzMEf2vR~vzrxXo!L)fGtT0-8}JK@ z|4C2&cIJ#Gwp9RTnUeash{*`PllNTWsFF*FF(@PF=v}U#Xe*f^|gfNx^~<_=C>&>tyJ&7IGL!LXB|B5 zZE(3#@YCIRaf_L*L#7t~mO4I&I~8J!4W)eIe?`jHx+PG|g*N=h>gqP{p;=#%W^HZr zkf6o4HU4?bSGum+U{0ESYjLspq|vq^_~hlS&mMz&>Xj3aOxWyw(HCr!$2FuptYcSn zQ)68y?92_@&6Qk=AG}KGO5}f=k{le%(RU^HuB7&CqRqfzGnBj!+bLJ|IC1#Jr>u?TZHSF8b%{3LYj|$wleuLa-H%xvN9bm6z04hCYSY2@}#)ez0_bkYL4hf zF2Y#((svJn1J<7gExT2I31LN;Q|@|oC2mm5)Jy%#`S$IBxQEgcng{oBUu&_JPk7?w zw7wAR(&@G6^@v-zHzcmfIjlqQKJTN`q5hY%I^3(+`J8-chfdUWCziDB`gpIsMP=42 z+CO!9ICtZfhx6Uh)T^nIVbZPv7rh&W|*!nBJN{STLoD z{F7?`<$M`I3;}WP(gp`i&}{2`tlvQS3~};la6wUx#$x@Qb2<)u=)HK2E?aYb7O&bD=Mz7 z**jj?dh>R|=uY0z`KtU8y}~uwAKR+p&;IFzIdF0NU-j<@4AH(o_t%Q?6udN);B=g^ z9rlsCZ_muN+p(k9NA;e#xQeFlza4qqG5QFxR<4y8D`lC(0F>vl&*f zpPRuI(+g9-x13+V_s!K4)f{xAaIQTtqp;=2fVtj&bB|u_`3H2>WuJE4L59BjgQkM} z?*FbrNs%4eK9m4a2<~NG5(Aem>Ez!_11mdpt8D^@og+U|soF(t*I80BiS|3c{p1_m z4ys!2I4z6$)4VmqaOZ8-jl0spv8B=pHsefbak)h*YFMPLRm`4a_7U=LFC?m+f2)Gg z_r86k`R0w;TMxvJceK)Uw%P_`ShoTlMRwV$BPml25-JY2P8(2Ntp%5vPph+ixuDjg zCcxluN%m^b-tcFZZCYcz%@udOCDC)oU)awxh;X;aMF&uJi{-Tl917>na>CfxMBj<6 z3V5k=GWK+uOQCi|sPl`?IO^=ewd0zbC3ce`b-~$dF7j<^;@%Vur=C}zFBx4@ng8+C z$UJLRUoPugheg}&G5~-3(qqye_>IzPJn;IT>i2JFKyYG(HN^XX!ysB$>EvKWN<)iW z;0mFQt-@VRAKk_@&eh=eG9_Du^X(J1Ip-^pwiFfd+NyEU>}uGfk)80*?vl1B{mq?)NM8 z__kMTjAK8vtP3RPUwq%J)7@s26_vhDeDIr+b`G7J}=t(wlkx3ahdwvt|Y5JHNjubTTpnS zafZ9Gc0Ba46x`x;ga5~y8BJHzrT+EN_$N&-HNInZvvRPh3bILUYoM)rA$HDWG!N>hmOSsPn4%_w~E9Pj1Fk$J*Y^q#DYQS+SL z>oz8LRWr2A<@P@;rn!sJoug5zcTZ@Uj=xB87S0P_4&c4J6{7BC9G;S;sk0(hk)j-v zrXD|{{TSg|z~_(bJm#rasQdnyvX;JKVFdiD`vXN8cLkfg+8f1LM;5-=-Bi4woZTb9 z z1*L;A0ku!gtCT&UEY{UO*nV#$;dY%~HwV4dK6@3K0VQ?5FGD(#ZQD?v?z#7ddk=^Y zR+KEpUbf$HZ3D+ReEV)xunXIpXP*^tw&9<;HJM-^>OCLbsPcPXaW}F;=SaI*BgOav zla~aIzRl%sL#FHlvhC{^TF&hbUkdF!V06;aUuWwpM_B#f9curm+8Kdo&rhA+_~1Xk z>L?~jAOA9sN9GxR(n#ZmN#d?oNr$e#uvy^Xa^@6MRy!Xn0}FT|oM13kMSJPpaXGf8 z`e^RDJF1p=w);Z0Kp@>E|i(lV}9KQv2CD@kr`<`nX%W{Un;&J3Bi0@t|O^ zpK(2FcfeCs5&Ap)^L24!Cua{jNlia%H)mT#4@Wn99*|hs+Dqf-yU=l!bdpAX_U=b{ z;4nO$q}lHuU^oQe>Spce%LBl5P-&p=Y6IL)?Pf4ns)Xce$IZubdtuNCLYeVo_6*;KP|%Wr%?Z?Y(R&a z1JF&K?fjg4dCc*I<2fM-(f>Sw%+Q2`{b4* z{J$2U@SrRN;2s>H7YX$O@EKwv11-N3M0Eqxma0NUZg@N-xXORLD zBq|)-gb6AHx&$8j1&)G27nI{c>Yq;-@C2PB1Vih!+Clg@AY=AYKTF7xXsJK@dQ^K)rzg5)B~rKmf4=+6)4S z9ncdH{4OLC;zoG$-^Kj5G{2vLZAm(Ug(46@Y)}vzFrfkgBozwcgMy?&L5xt4R4Ax~ zK!HI3iG_y50(~L~AhCcxg8)m{Lm0TG{g`6yOh7B`S1D{`zHLZKgkNkE4$|B3*&g8SvsArpb)px%a% z3}gaa*sln%W8ClDA-@Pb=dWLYPsRPtAD9xHa2GLw0gw#;j|7S!@{a^cAo7m{Kxp9Z zesKd*19$x^q6#AaNT3EHza;=>2nWveXEI=daQtrxfGPgGQU;O$nB>o!JO~jSi2RlS zm?#4D0KXV0g2-si-7%>0GRO4)2<2JR6*po1i+jTpac2I4VX28a5OQL z445|p)c3DnkRb9~0$}b4Lbpgrh6a(}5&-i@02lS^9Z&>N-@hWD2!uX}@D3;fsPA9D z0I7is`xOC#5>6k2k^xtM0M+&L7vK`04((S2utxl@1K=VMgbtPP4q#0fbr2$;b_n}D z2@#MFsMTK#KOoFm(9E4U`Ae-LD8J4`JOMN(S5z0=U(mzW@_QfV%q?0p%gATS3Wy zIU@*bE`$hRP3V9L5x^QaSn!S<`1?IZL;xrIR|_<7;uDwc!FkqTDUooVHD6dnobUBs zQOfGGkHW_;-`}&dHmtgepOItf4L`~{3-d=;2W>6(*o=G ztd**}EAv{nx$Bm8YSXBE`_srBQruq~QGRXWfdA|G8#KWJe-9hLnecyN0af%b7XRB% zkwjmZRO7T6&+#xRulvTFIAVJB1*wKkWG5k%HmV$t7q993U2M??fu&KC~a-;zbh?o;}cgi!1Mg@mLB zq-)cig8>QgsuDb{_p^)_`6p~@w~oHwZ5kZ;{8E=QN>2h^L|x2L8XvYEtN@Q(YE&^g zZBg>tUZsZP^#?xBv-LrLv<2(t}KOg(*PIw`gFd1@$i zUY>w&)Y}IRtK6v;KCgCOV&o#|I0<4H1GW2K2#3WJwXCj*W+Feh-3B@^X`fB>dXP=- zZo&yOm%iC(YtU#Uie-ku6j`Vy*fe~3&dYC@anjRAoxS|1!HaoA=D>=*!!wx7=jPVf zF^{s@UYbTZeXpd{MjHDd`O?pg@7Q_YsnN9u4f&{57l!FeJQRpnw=I1x9ur6U()-)`jP{-1r@P-_b5ci$7}m&T}}F z*UIWjc;PmMdBKJmMe}6_wes!fN8C#USZVz8bC%9%9G!YKT$Lz58KZ^zE|@NIq#!Km zrmU2BW{f1IqN}IKrH9S6eRn^Kehu5@$j;-^r>gBc(c_S8_@P*Gd9@+Emu{OM!gZ)T^&F8Nk=;?!ia^xW^Y)4ijQcr&)r zRCW!4&9tr^=XUyRc;Ux>jT%F=%nWmANk+c1^J8nK69?*^>TbxSu$KRb(9^vy?7U+- z_|?NNgKro-+h%@}JJUo=_IA%r=qI zNhuMZ&OIxx|B#aF{K#~LH2uYbS{{0E5)*EBs47P&!|i)XaeQ3a65d}+i6bY{!>#^w;Ak~G;Bxg!J#Ai7L(=~GKK}+wID}-U+eX)8AsXhVR)#{J3^SdO|HgB+QSZO^80x=U#1W0gelQ=5DU*O|jWmFSRef z$a|MwD!;30Kl(tLpwJ1v@KY?`MF$kbc3kU7ag$%=#%`Q9(O+|0>f_td8+`Dj44gWP z%HQ0h+q>W6j*6?QpW6b3@qI@bMpOrH;p!$H*Etj_0q?!`jJK;+GeS$g??$W{GK1ox zYg%4$rEdGQD%@zs`(Tr=b~yXhmzImU(|r#nfVF8cRfS2SGK*`RMdM|r*B&Dr`r;BqyuoCf1NAr`{0yup zG7b)~)hlcgRaF4-H)AwcBM6JuU%4#WwWwQ6YVk)Yi5Vb4=(i;8}luwcQk!<>!xd&>p1r7uKB zB}OR4a0~`%-Mg(ha3X}WV5H+8@&%982pvy0Z&+E0R$F#SJfdSGe!r5tbwz5I` zjMlAp5ua4bkIIZ8!&e@Za506aB9r`64Cq88v=-K1&G{XUSauoyj^N~sk}CgfKHQ_$ z?_9LtEzKa+TWWZ$8Y8Xelde;TXYUt6s_J+2;SFp0XC=LF6yJYp`B-+F_=a+ltT{=tR8*Y9YH z9#`^ix^l928Xk%nif+_GJADf}tZzHg|HM>3gV8Utt?8z6o~Kvs276OmrWj-0Yo7-% zj!0j!j)8#-k{KPiHi{=%R!5f)Dt4nx{-p2FcHA7|s9tO@vXNm>@%-a(_gtpS{8xz|_<*04L|J`?A7N8Xgem#>=&u_J8El<8*; z#$x)d?$%LVk2X1RO+R({M^l*SI?K`5bw_zToA?U1CyouJJ z@#*N9E37F&3Z3TPZOugKuawg>%jqM;jKW6sv&Gm_Sa)Bm+dhq2M&Bsl+MAI)iPbS= zE%=n7eSZ7lK=$U|myRP-hbdLQc+kG3V!u(fqBh9evmVjocr1bmw!Ey;aM7`ODOhH5 z=_BjD=^fTMGudxVZ!-eEev8ldc1t*jkV2F9v$Z|j}KS1KB&_ShlA=zfZKD@V` z?pR>N=9tXF{H%kMPe`Pmj%v6!8(S3BFShahNRK6g4s18-PvAaYfim;x)BY-X)G^bx z{(-N5t_o$z?W4T+oV8ChSd`Lmt-a?_c2ap_`Qn7XyU3N5xIxpcuHd$P`}Q0?#*s0{ zaQ7AGg6nJ1=9NsT0PYRUb&0`b|NNBui#fft;uyiSovd$nOP5?&(<}EVj9R+b)cZ`V zqx%{&mY+lZ?wio7yUbEdRRRWdSAySsr7F~ghvlb4a}TnqV8govWi+@P-%k3l$=#ur z5r2S_v@E^0a}X_%G-eaiZI?TIICv)7FD~KaG6j3#NOcZoCDQZx-uCPP&WH09-osZW zV%5*9)|gCEtdA>%u?o(fJlpqSaaQKo)Xl6ngG&;#rBtsL_!xdrnVi2`b|;csgz+`M zE5)U=&3Qr5hJjs_9|l;d77IKs^hR|CC(!EDdyHibI%b~DKH1SRBPpXJNc|_Rg{EJB zp|waX@gN9X;|sJFy2kg3<)-{hy77B)A#lmclWfh9ZcZ^?I~winikz5qoUs(oW^VpP z?5ZQ2mZvs#X|w8m%E*@Gsj}e^iShmWn z?b7jmM*7Q#R3jB=4jJsQ>9H<+;;P9|s-@unDY6S3Z+PgIHLAk5RLJLNeha^CED*EZf(6?%UU2ZrBJk+z}KEzZ8nXXmTQ?#l9a#?mPPX`;cg zd~Y zSM|0=miJjzu5N0+U(TG`Hn*bjXlm!@kyt}l@#(S0Htr3s$GRL_yJ~YThGt!<&FS;g z3=&6w4>9sq&fmOMRjT(=Cd0SsV%GNBLznB?sSwTkZ#gh3h4d|ZoNOx%8hbbM4MjKj zwybusroOM^>*F6g=6G_x{;BDpiQ->09hkZPdt9GrJ8RkA zZav(E*K;UhidBh089FnCGpY&j#!-Fu+9|zX#&EXmq5y*ww@s*u`<7Jwv-UL)79Hy1 zQ{t9EYNjUe#C@_Rd)^5Qz#WC=I3M?H^UE_e;>sF+Y&_l<$JY1Vp;22m0#$k`;sgUf zM+-+qLGq0aL#=RYY5NZpeP~y8QLLPVvPWErEBYozf_@~J>FboigU04-5*+CdJ}LNH z<}^pS$9kU(F2`Z&5Et`8&dH}Y`JXx}?b}{Gt(48!-!t`2xH@F!y?=e!fX9awxw18n z`VR+(_geqy`z+>PGu&VELF~Uir=bX|QebK=4pzFsIzM3{2uus1ho2;Ox}OUUZghkR zxEmc3ETR1T36TCt=0@j4=kTZ5B>495SB-|j6J2Vgnyh_qn1^0v=7g%}Hjb~_^WLJ# z4J|)ziCB9%ScuWCDz*u-G^ps%=Q`gg+|It4dzkL2+0we!GnoIT1sb0D2^`1T*_shj z$eBG_y5Y`g9Y4d@FRdEM%74s*nR$o)iNg&qHZhejEvZW_N8!(Jmz8o_cV49xoKM!5 zNe#$Axi-3e)Dl-6w{UF^DouOP=i48XR=kp&QS)iX%aF2HLP4dm3w^;KU>{m-45Z6}ZJ`BV(gV{L=m?esU!WA%`;AARZepdx0YugH*I^igpU%iIG_=j%gK0V?pu zU`mnmucbL@%NnhYTUgLL+R#kX6(k$b>Qa~O%CG)5m@vfaaw#|5bEj!4H#bJRRuEot z6#MQ*BGPK6t4(pl)Y*K^|hXzA(GeK1&tMU4IQ z*JZ{qCYAUHiuvn@75B2McBe>n!!$J3S!P9IWveoQsVrP)v+=!kRzF2hm!oCE}yJ_}osF*C&9`&N@-aRt+ zyQV5%`R2c!zj6KWo%rSwsafe)@5H+M#!d~eB%f-cs(uC(vbSmdjGp@UxbElKDx5)@svu?{S{PTDbn`p>f>wQ42++_V)AZZUF*Jem#x=k z!$Q<2_Nw5wD@id(`MLYSF=hR=p#q=U^aYe(=*1=Y3YZRk(=bQ89Q{U@;Y@epqJp3oO^_S}@SmOO%s>XPhDGWV8shmD1I`sV1vv&hln_E4U37IAW zEKR+$RLpoDF+npGCxsVuT8vwr{1W9|ls`^sID6E0eEkmJBi3lQA(yzezEu-?U4A#O zh!}E_M}xklY3v(QZY^IUmG zv?QhJ$uq`KNQGVqPkUmME$zE5t`3Xz8#_4^W`dsC+SF;r6q=TNX zHYkPJEKPKI3(UaNp0Z2^>npH#D|F~y^c-(5N8KN8v_95ef+)a+y~E&kZJ&I?c9Zq- zIn`-Cy~pvPIxpK_P+5eEW!_zT?dTyv z37Vs{jue*B^ZQRS-buL?Cw0b*>g2Yh4>hV5F9l!gBzwJ&i!n(6my_kJaI zwMU(jrnCoNhb(cOF`5{5TYZ*$KTGV4t$15^xX6jsH9Z(C^YtE2s-kZP)Ccd6zkGjh z&-$4GsaQFB&fMj0p5rIW0~doRicQb*y1!d!Fdm8O6bN`;PPk%Gr1hL6pHF)Z+cZ&`$LNWnyLl@gjT=urL}LbgGahs1r>0yS z)$vpFWK5ARny@_5j=|QY#(27ktgvz5Dko%p{ISu3(}6YNK^x2lfwnE|Mvr=V9;zof ziqndjNO`>Uv32gd+^SA>=OUObZ%=)90gl@%n>yIO zVpu4a_|4PkMl1HPpF)oF@u8dLiuCLg4jSG(kta7cM#_b`e#q7_uveT;OJ^3F%qxGh zV7nSty{_i5Ay;s8m*RoISG%trYh-zbVL(+rj+CbxRcDZYJ)&(~R%pyyaJ^NBS~FTu zVkuOmg+5yzmoBDR>?(3@*b1DabkN~(wV0s0Db=g0YOy_EG#uy>H1qSPv)GqET5zUJ z{}Ap>Ve~OOZ^W)fot}NX^Javw)v*n6s$MI+{b=^Nz2!p{-vd2Ulwrdt~r)^E6n)|ep7waCDa7V#V0TC~hl~|=~a9-r#;i@N$2mHKG2JN|8 zD58v2vc2wappg4P^Hrc;pm%gz>D2@Nb}0}!};ZU+0sXA zw~Rg?Kg{T?$*XErZKCLLyx_^`^S0^DHx#Ab-@30n_v1CQHc9Kw&}-tE%^2C|eUdNT zcU3ckSxDwsLG6L6QP@Sr%BMG-au(zjyE%sa@b!jMJA{%w5v!KN)i@wDnCW zXN;zH!N=-M|M#U&SKeOVxp-J%drTJHy$Bcns0rN({;Bs?al?(>4iXXvUv>;y*Y~!L z9ZS-2!_BBA5LYcnb^cb|7e9SbQ@jS*!t%l zKR<2ekLaw>lKv}vpF`1k4`sS+1spLTB zx#$l@u^y-QjEBAYxNEQIVQj}vYw6pFZdQ8ZkKv}w<3fCiqDCBS7uF8VzNWVc*TQ7j zUy{FilfR;y#bd4ENUs0$V&SvBEXJquzP;Mz@Yva$zGkD>F+<%lR580#q(f(KXBz%> zR~l8LNsqlVZEdp@m5J7lY#x`I@FTC1oi1s{BAM@)oDZ8m@V0*dwJf@CvP9ALRFl=I zg@Kcl6*t#|_QVX%yWH=7JE*UoRo6{-?}g*S+neUiUD@7c84q5p-9V^HyI<3vy_>zX zr5=U#^pU9c~@o? z_gbA5*zR+-iRytnYb4#xq|vUrKH&tZjT+XFJsWuyLv{sH@MW4vditP zbG)O@@ny@0SN$$4@db!e%}q@5jB^K{Gq%IpB?u>6b+<|!QeNAsh)`%#3|#Ee8h3K3 zXsKIQ8AKPCM`-E>HqNY>JlsDpS-bD}C$H^yr(2qDx>p`HTW|R)Sbh3|;=+zZuaCIw z=TnbGknN8jXK(j+OrciM*qJH2V)jLQ<^yl;D5{0KPJi=&{N&=sZu7l4J%4J$zg)mY z!eIZNUs@Wpcs%7{s$JQ03IGz{+|4?wq;bbmTkdmC&6jP|eG-Oo%tFzdp_EmHJv*HO z-eg%iI1i(rxc}Ll%@e;nrL)HM>->A~kF8Zq z?b~Om^+HxZ(gj|2!B56_VsCXiBO8VbKL$pwak$odt4)S6^D2yMIDJRF>9 zj&Vm&`tn%Fd<}2|0hN z^M5TO{s*V`AMo2xFx>xFq@fxa(op;-(C{w-hu}m1p8r7FNWeWn zPLB8!^6yW{|6TvWQ3SNbpV^@aIEbGilmY<$ARHe7tO3Och+^mgfLng9YybcxVTA(_ zp$hs}S^&1d5YU5o^#2W%-2V!3^#3P10SNbB6$tqomj}=t#JIfwAmD!UB;eoviTs0p z`%NKW-~NgGgM9mk@-N)mAHi8a;TsUV1HeN7RPHAt1j7FS$lR|80Kx$1+|LM*7y%%3 zzajwCLiiv~cprdT2%ql=5dd8QVANm90Ez^FDSt%(iUdGKe?}l!4PkW+dIxA8g0Rv> zhyZagza;>oAOO(x*E;|rLl}(_l7V~(!v-QuN|2Ae107%u0L1`~{Ob>>DjHyO9C-er zL1-oDkH_N|4G$nXKu#hW3kHPo)M7$A)?_yrGhFZqQOuEtQRQ_g31xm@CZEQrif@T zEDkJe64UTVI8^aO>3}aG=Z8c=@F?PR0NYF^GZI7gE&%007(t?U@kl%bZ6l%q)E5K- zBckC^a0vcIL<9N>X&Vs@55yx&hsQ&(Hj;D@1e}^GP6v?Y zB<%}B!3n@Yl5~W%VjQN}QO6M`IwUBT+guKv|Nf!;|Vg z42_19>jC&QNyZOHfT|JYfybaAj3yBc11yBJ4)7Q(1V1BAhr&Q8P9ho}3xjZ2L^QDe zO;-1KECC=$ln#qTL(YkahR34F>J*E`km(v8iy_kkpal>#kAxqNOlBMkOP((dSPjX$ zI4mCOaf$iikPuFhmsJ8{+Pg%;4{VYm)fWK#ChY@24?u3yz+*#WKVtbne@>PM z*i%K82WU3Zb^}mn2$fCD4@`kbeFWGfMqXZUS0I@zpyMLr2h^ENKG5fpwi`4gB-KAK zxFz#pV2lfGa3Gc$i-9&UkkD{sx&elcV3{l(82gawGhj#BAA!LOhOB*& z7(DdBp18bVEI?KcFcu(BhXts5(s#-I6%vOemj#D~b}hLz)Lzc_r0nFlvJKC=km6c1n`ThsKh%5wJGW zIT+a2NN)RJ_(x_lU^GeA=b^xqiL^byW>j+d!0?JxU(jGMK-#`&(AkpH$lDhUdT-Kv zf%hY`Ei~v<$oPTD3)#D14+v=+p~>fE0B!y z1?gBGY_lcT4XB$TwN(trf=oU%o@_jX!4TGFi1Yv~e3JQb;5x}{5CgWylIjcS%ESMB0TT;S`M{KpEMF|SU1Gr;kxUjG3fdJ$TwWZ| zd@>sOcmsm^~l-{4+h_)<$%@>N&OHW2^#o6`TF@- zJG\n\t\n\t\t\n \n\t\t\n\t\t\n\t\t\n\t\t\n\t\n\t\n
\n
\n
\n\t
\n

Toxicological information

\n
\n
\n \n
\n
\n
\n \n \n
\n
\n

\n Administrative data\n

\n\t\t
\n\t\t\t\n\t\t
\n
\n
Confidentiality
\n\t\t\t
\n\t\t\n [Empty]\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n
Justification
\n\t\t\t
\n\n\t\t\n [Empty]\n\t\t\n\n\t\t\t\t\n\t\t\t
\n\t\t
\n\t\t
\n
Use restricted to selected regulatory programmes
\n\t\t\t
\n\t\t\n [Empty]\n\t\t\n\t\t\t
\n\t\t
\n

\n Workers - Hazard via inhalation route\n

\n

\n Systemic effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t DNEL (Derived No Effect Level)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n77\t\t\n\t\t\t mg/m\xc2\xb3\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\t repeated dose toxicity\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n3
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\t NOAEC\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t low hazard (no threshold derived)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Local effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t DNEL (Derived No Effect Level)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n293\t\t\n\t\t\t mg/m\xc2\xb3\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\t irritation (respiratory tract)\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n3
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\t NOAEC\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Workers - Hazard via dermal route\n

\n

\n Systemic effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t DNEL (Derived No Effect Level)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n180\t\t\n\t\t\t mg/kg bw/day\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\t repeated dose toxicity\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n12
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\t NOAEL\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Local effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Workers - Hazard for the eyes\n

\n

\n Local effects\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t low hazard (no threshold derived)\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Additional information - workers\n

\n\t
\n\t\t
\n\t\t\t\n\t\t
\n\t\t\t
\n

B.5.11.3 Worker-DNEL short-term inhalation route

 

Peak airborne exposure to ethylbenzene vapour may cause local respiratory irritation.

 

 

B.5.11.3.1 DNEL based on local respiratory irritation

 

The dose descriptor is obtained from a study on 12 normal human volunteers exposed for 4 h to fluctuating concentrations of ethylbenzene. The exposure consisted of 8 peaks of 200 ppm separated by troughs of 10 ppm with an integrated mean of 98 ppm. No significant difference was found when comparing the reporting of subjective nasal irritation under these conditions with that at a constant exposure to 10 ppm. Similarly, inflammatory biomarkers in nasal secretion were not affected under these exposure conditions. In addition 12 volunteers with self-reported Multiple Chemical Sensitivity (sMCS) were investigated. Nasal irritation reported by the sMCS subjects was again not significantly increased when comparing the fluctuating exposure condition against constant exposure at 10 ppm. But reported irritation was significantly higher in sMCS subjects in comparison to the normal subjects under the high exposure conditions (van Thriel et al., 2003).

 

Thus the dose descriptor is the NOAEC of 98 ppm for 4 h of exposure and 200 ppm for short term exposures of 15 min. Irritation is a concentration specific effect, it is therefore not necessary to modify the dose descriptor to take account of differences in breathing rates between volunteers at rest and active workers. The starting point is therefore 200 ppm for short term exposures and 98 ppm of a prolonged duration up to several hours for the normal worker population. It is pointed out that this starting point will lead to a very conservative DNEL since the NOAEC is the highest concentration investigated. The \xe2\x80\x9ctrue\xe2\x80\x9d NOAEC is not known but certainly is higher.

 

There is a further aspect strongly indicating that the NOAEC of 98 ppm for local respiratory irritation is highly conservative: after short term exposure of volunteers (one sniff from a squeeze bottle) the odour detection threshold was in the range of 20 ppm with a broad individual variation. In contrast, anosmics that will only react to (true) trigeminal irritation reported a nasal pungency threshold of 10100 ppm (Cometto-Muniz, 1994). As in the study of van Thriel et al. (2003) subjective reporting was the endpoint recorded, this subjective reporting may not represent true sensory irritation but may be confounded by olfaction. (Problems associated with defining the threshold for olfaction are demonstrated by the recent study of Cometto-Muniz and Abraham (2009). With a more sensitive approach as compared to the 1994-study, the odour threshold for alkylbenzenes decreased by several orders of magnitude being 6 ppb for ethylbenzene.)

 


Table B.1: Assessment factors and DNEL calculation for worker-DNEL short-term inhalation local effects

Uncertainties

AF

Justification

Interspecies differences

-

The starting point is obtained from human data so it is not necessary to apply a factor to take account of interspecies differences.

Intraspecies differences

3

There are no data to quantify variability in susceptibility to the irritant effects of ethylbenzene in the human population. Since irritant effects relate to the concentration at the target site it is not necessary to apply a factor to take account of toxicokinetic differences. For workers REACH (2008) proposes an assessment factor of 5. However, following a review of data on respiratory irritants in human volunteers, ECETOC (2003. 2010) concluded that variations between individuals were small. In addition, the population exposed in the workplace is highly homogeneous and the health of the work force is typically good (healthy worker effect). The analysis of assessment factors conducted by ECETOC also showed that estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states supports use of an assessment factor of 3 to account for intraspecies variability within workers.

The factor of 3 proposed by ECETOC will be used for this study in humans.

Differences in duration of exposure

1

It is not necessary to apply a factor to take account of duration of exposure.

Dose response and endpoint specific/severity issues

1

The starting point is a NOAEC. In this study, normal volunteers exposed to 200 ppm for short term exposure peaks and to 98 ppm for 4 h did not report irritation. In contrast, slight irritation was reported by a selected group of sMCS persons with a high subjective sensitivity.

Quality of database

1

The quality of the database for this endpoint is adequate and the study was conducted to modern standards. It is therefore not necessary to apply a factor to take account of deficiencies in the quality of the data.

Overall assessment factor: 3

Endpoint specific DNEL: 200/3 = 67 ppm for short term exposure peaks (15 min)

Or 98/3 = 33 ppm for single exposures over several hours

B.5.11.3.2 Selection of worker-DNEL short-term inhalation

 

A DNEL of 67 ppm was calculated for respiratory tract irritation for short-term exposures of about 15 min. For single exposures of a few hours a DNEL of 33 ppm was derived.

 

 

B.5.11.4 Worker-DNEL long-term inhalation route

 

For long-term repeated exposure the potential of ethylbenzene to induce ototoxicity and developmental toxicity should be discussed. For these endpoints the available human data do not provide sufficient information to allow a human dose descriptor to be identified. A dose descriptor for these endpoints can be identified from animal data. Since the dose-response relationship and evidence base for each endpoint is different it is not clear which is the critical endpoint for risk assessment of long-term repeated exposure. It will therefore be necessary to calculate separate endpoint specific DNELs for each effect to identify the critical long-term DNEL.

 

 


B.5.11.4.1 Endpoint specific DNEL for ototoxicity

 

Repeated inhalation exposure to ethylbenzene vapor was irreversibly ototoxic in rats (Gagnaire et al., 2007). Auditory dysfunction characterised by an elevation of hearing thresholds was localised in the mid frequencies and corresponded to the loss of cochlear outer hair cells, the sensory cells in the inner ear. Hearing loss and cell damage increased with exposure concentrations. In this 90 day rat inhalation study (6 hours/day, 6 day/week) the NOAEC for ototoxicity was extrapolated to be 114 ppm (500 mg/m\xc2\xb3) based on destruction of outer hair cells in row 3 occurring already at 200 ppm. In contrast, no effect on hearing ability as indicated by audiometric thresholds was observed at 200 ppm. According to several case reports where hearing deficits in humans occupationally exposed to organic solvents or from people after solvent abuse are described (for review cf Risk Assessment Reports on toluene and styrene), these rat data are taken to be relevant for humans.

 

Thus, this extrapolated NOAEC of 114 ppm (500 mg/m\xc2\xb3) is taken as dose descriptor for DNEL derivation of repeated dose toxicity.

 

The extrapolated NOAEC of 114 ppm (500 mg/m\xc2\xb3) from the rat is (1) multiplied with a factor of 0.45 (for rat absorption percentage of 45%), divided by a divisor of 0.65 (for human absorption percentage after inhalation of 65%) and (2) multiplied by a factor of 6.7/10 for activity-driven differences of respiratory volumes in workers. Further differences regarding the experimental inhalation duration (6 hours/day, 6 days/week) and the working conditions (8 hours/day, 5 days/week) are not considered, because they roughly balance each other.

 

The calculation results in an adjusted inhalation starting point of 232 mg/m\xc2\xb3 (500 x 0.45/0.65 x 6.7/10).

 

In the overall assessment it has also be taken into consideration that the starting point to define ototoxicity is not auditory dysfunction but histopathological effects on the outer hair cells of the cochlea. Numerous investigations have shown that audiometric hearing deficits occur at higher exposure concentrations than (small) losses of hair cells in the outer row 3 of the cochlea. While the audiometric effects define the impact of ototoxicity for workers, the use of histopathological effects for the starting point implies a further conservative factor.

 

Hearing loss is not a specific effect for ethylbenzene but has also been found with other aromatic solvents like styrene, toluene, or xylenes. Detailed mechanistic studies, especially with styrene, have demonstrated that ototoxicity is exerted by the unmetabolized parent chemical. Studies in rats with styrene and toluene showed that hearing loss occurs within a few days of the start of exposure and although it does not increase in severity with prolongation of exposure, the initial effect is irreversible. According to Gangnaire et al. (2007) ethylbenzene led to maximal hearing impairment after about 4 weeks without further deterioration by prolongation of exposure.

 

Table B.2: Assessment factors and DNEL calculation for worker DNEL long-term inhalation for ototoxicity

Uncertainties

AF

Justification

Interspecies differences

1

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an inhalation study and is being used to derive an inhalation DNEL. It is therefore not necessary to apply an allometric scaling factor to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

3

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However the population exposed in the workplace is highly homogeneous and the health of the work force is typically good (healthy worker effect) while metabolic differences due to genetic polymorphisms do not automatically require an increased assessment factor since alternative pathways of elimination are often present (ECETOC, 2003, 2010). The analysis of assessment factors conducted by ECETOC also showed that estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states supports use of an assessment factor of 3 to account for intraspecies variability within workers.The default factor of 3 for workers as proposed by ECETOC will therefore be used to take account of intraspecies variability.In total there is a combined inter- and intraspecies assessment factor of 3 that takes into account that the target tissue (outer hair cells in the cochlea, anatomical structure and blood supply of the cochlea) is identical in rats and humans. Furthermore, not hearing deficits but histopatological alterations of the cochlea were used as starting point. As histopathology is more sensitive than the physiological effects this is a conservative starting point per se.

Differences in duration of exposure

1

The dose descriptor was obtained from a 90-day inhalation study. Although the DNEL is to be used to assess long-term repeated exposure there is strong evidence showing that the ototoxic effects of ethylbenzene have already occurred within the first four weeks of initial exposure and did not progress in severity with further exposure. On this basis, the duration of this study is adequate for the endpoint being studied and it is not necessary to apply a factor to take account of differences in duration of exposure.

Dose response and endpoint specific/severity issues

1

As an extrapolated NOAEC for the most sensitive endpoint, i.e. histopathological alterations of cochlear hair cells, was used an additional assessment factor is not warranted. The study encompassed a range of concentrations and durations of exposure and provided reliable information on the dose-response relationship and severity of effect.

Quality of database

1

The findings from the key study are supported by findings from several additional studies with similar aromatic solvents conducted to modern regulatory standards by separate groups of researchers. Therefore it is not necessary to apply an additional factor.  

Overall assessment factor:  3

Endpoint specific DNEL: 232/3 = 77 mg/m\xc2\xb3 (8-hours) corresponding to 17.5 ppm

B.5.11.4.2 Endpoint specific DNEL for developmental toxicity

 

For DNEL derivation with regard to developmental toxicity two study types can be taken into consideration: (multi) generation and prenatal toxicity studies.

 

B.5.11.4.2a DNEL based on (multi) generation studies

 

For definition of a DNEL two studies are available: A guideline 2-generation study with an indication of slight foetotoxicity (reduced foetal body weight and occasional increases in skeletal variations) in the presence of maternal toxicity with a NOAEC for foetal and maternal toxicity of 500 ppm and a preceding probe 1-generation reproduction toxicity study with a NOAEC of 100 ppm based on increased postnatal mortality and body weight gain depression in the offspring. As a general strategy, the guideline 2-generation study with a large number of experimental animals should have precedent over the probe study with a more limited number of animals. Furthermore, the specific life stages relevant for derivation of a DNEL long-tem for workers have to be defined.

 

While exposure during pregnancy and lactation (via the milk) might occur in female workers under rare circumstances (e.g. if maternity leave is not taken), it is not possible that inhalation exposure at the workplace will occur in humans directly after weaning. Therefore, only effects found in offspring until the date of weaning should be taken into consideration to derive a long-term DNEL for workers, but not effects occurring in the offspring exposed by direct inhalation after weaning. A detailed evaluation of the probe 1-generation study leads to a NOAEC for the offspring until weaning of 500 ppm including body weight development and postnatal mortality. This corresponds to the NOAEC of the guideline 2-generation study.

 

The NOAEC for developmental effects relevant for the exposure situation at the workplace is 500 ppm with an exposure schedule of 6h/d, 7 d/week. Transformation to the workplace situation (8h/d; 5d/week) would lead to 525 ppm (500 x 6/8 x 7/5), corresponding to 2300 mg/m\xc2\xb3. This NOAEC from the rat is (1) multiplied with a factor of 0.45 (for rat absorption percentage of 45 %) and divided by 0.65 (for human absorption percentage of 65 %) and (2) multiplied by a factor of 0.67 for activity differences of respiratory volumes in workers.

 

The adjusted starting point is therefore 1070 mg/m\xc2\xb3.

 


Table B.3: Assessment factors and DNEL calculation for worker DNEL long-term inhalation for developmental toxicity (generation studies)

Uncertainties

AF

Justification

Interspecies differences

1

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an inhalation study and is being used to derive an inhalation DNEL. It is therefore not necessary to apply an allometric scaling factor to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

3

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However the population exposed in the workplace is highly homogeneous and the health of the work force is typically good (healthy worker effect) while metabolic differences due to genetic polymorphisms do not automatically require an increased assessment factor since alternative pathways of elimination are often present (ECETOC, 2003, 2010). The analysis of assessment factors conducted by ECETOC also showed that estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states supports use of an assessment factor of 3 to account for intraspecies variability within workers.

The default factor of 3 for workers as proposed by ECETOC will therefore be used to take account of intraspecies variability.

Differences in duration of exposure

1

The NOAEC was derived from the probe 1-generation and the 2-generation study starting from exposure of young adult animals of the F0 generation up to weaning of the F1 generation. This represents a worst case scenario for workers being exposed from the start of their working life, during pregnancy up to the end of the breast feeding period without maternity leave. An assessment factor of 1 is therefore appropriate.

Dose response and endpoint specific/severity issues

1

As 500 ppm was a clear NOAEC for developmental effects in F1 offspring before weaning and start of direct inhalation exposure, an assessment factor is not warranted in this respect.

Quality of database

1

The key study was conducted to modern regulatory standards and was adequately reported. On this basis the quality of the database is not considered to contribute uncertainty and it is therefore not necessary to apply an additional factor.

Overall assessment factor:  3

Endpoint specific DNEL: 1070/3 = 357 mg/m\xc2\xb3 (8-hour) corresponding to 81 ppm

B.5.11.4.2b DNEL based on prenatal toxicity studies

 

The derivation of the DNEL is based on the prenatal toxicity study in rats of Saillenfait et al. (2003) at exposure concentrations of 100, 500, 1000, and 2000 ppm during gestation days 6-20 with a NOAEC of 500 ppm for an exposure regime of 6 h/d throughout gestation days 6-20.

 

This NOAEC has to be adjusted by a factor of 6/8 to account for different daily exposure durations leading to 375 ppm. The dams were exposed every day during pregnancy and not at 5 d/week, as would have been workers. Due to the short duration of pregnancy in rats this fact is not taken into account by a factor of 7/5, leading to a more conservative DNEL derivation.The NOAEC of 375 ppm from the rat is (1) multiplied with a factor of 0.45 (for rat absorption percentage of 45 %) and divided by 0.65 (for human absorption percentage of 65 %) and (2) multiplied by a factor of 0.67 for activity differences of respiratory volumes in workers.

 

The adjusted starting point is therefore 174 ppm.


Table B.4: Assessment factors and DNEL calculation for worker DNEL long-term inhalation for developmental toxicity (prenatal toxicity studies)

Uncertainties

AF

Justification

Interspecies differences

1

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an inhalation study and is being used to derive an inhalation DNEL. It is therefore not necessary to apply an allometric scaling factor to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

3

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However the population exposed in the workplace is highly homogeneous and the health of the work force is typically good (healthy worker effect) while metabolic differences due to genetic polymorphisms do not automatically require an increased assessment factor since alternative pathways of elimination are often present (ECETOC, 2003, 2010). The analysis of assessment factors conducted by ECETOC also showed that estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states supports use of an assessment factor of 3 to account for intraspecies variability within workers.

The default factor of 3 for workers as proposed by ECETOC will therefore be used to take account of intraspecies variability.

Differences in duration of exposure

1

The NOAEC was derived from a prenatal toxicity study with an exposure regime from gestation day 6 up to the end of pregnancy at gestation day 20. An assessment factor of 1 is therefore appropriate.

Dose response and endpoint specific/severity issues

1

As 500 ppm was a clear NOAEC for developmental effects, an assessment factor is not warranted in this respect.

Quality of database

1

The key study was conducted to modern regulatory standards and was adequately reported. On this basis the quality of the database is not considered to contribute uncertainty and it is therefore not necessary to apply an additional factor.

Overall assessment factor:  3

Endpoint specific DNEL: 174/3 = 58 ppm (8-hour) corresponding to 255 mg/m\xc2\xb3

B.5.11.4.3 Selection of worker-DNEL long-term inhalation

 

The most sensitive endpoint-specific DNEL is that for ototoxicity. This endpoint is relevant for workers. So the endpoint specific DNEL for ototoxicity is identified as the worker-DNEL long-term inhalation.

 

The worker DNEL long-term inhalation route is 17.5 ppm (8-hr TWA).

 

 

B.5.11.5 Worker-DNEL long-term dermal route

 

No information is available for systemic toxicity after repeated dermal exposure. Therefore the extrapolated NOAEC of 114 ppm (500 mg/m\xc2\xb3) based on ototoxicity from the 90 day inhalation rat study is taken for the dermal route, ototoxicity being the critical health effect leading to the lowest worker-DNEL long-term by inhalation.

 

Expressed as (external) dose the value of 500 mg/m\xc2\xb3 corresponds to 190 mg/kg/day (500 mg/m\xc2\xb3 x default respiratory volume for the rat for 8 hours of 0.38 m\xc2\xb3/kg). With a rat adsorption percentage of 45% after inhalation the internal starting point corresponds to 86 mg/kg/day (190 mg/kg/day x 0.45).

 

It is necessary to convert this internal inhalative dose to an equivalent external dermal dose. There are several studies investigating dermal penetration of ethylbenzene but most of them have severe deficits, either using preparations not relevant for the possible exposure scenarios of ethylbenzene or not allowing for evaporation after dermal contact. The OECD test guideline for skin absorption studies describes a device that should be used for volatile chemicals. Only Susten et al. (1990) used an experimental design similar to that described by OECD. With hairless mice they determined a dermal penetration rate of 3.61%. It is generally recognized that the skin barrier in hairless mice is less efficient than that of humans. Therefore a penetration rate of 4% as used in this calculation is on the conservative side.

 

With a penetration rate of 4% the internal inhalation dose corresponds to an external dermal dose of

86 x 100/4 = 2150 mg/kg/d.

 

Table B.5: Assessment factors and DNEL calculation for worker DNEL long-term dermal for ototoxicity

Uncertainties

AF

Justification

Interspecies differences

4

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The starting point is derived from the internal rat dose in mg/kg/d. Therefore, the allometric scaling factor of 4 is used to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

3

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However the population exposed in the workplace is highly homogeneous and the health of the work force is typically good (healthy worker effect) while metabolic differences due to genetic polymorphisms do not automatically require an increased assessment factor since alternative pathways of elimination are often present (ECETOC, 2003, 2010). The analysis of assessment factors conducted by ECETOC also showed that estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states supports use of an assessment factor of 3 to account for intraspecies variability within workers.

The default factor of 3 for workers as proposed by ECETOC will therefore be used to take account of intraspecies variability.

In total there is a combined inter- and intraspecies assessment factor of 12 that takes into account that the target tissue (outer hair cells in the cochlea, anatomical structure and blood supply of the cochlea) is identical in rats and humans. Furthermore, not hearing deficits but histopatological alterations of the cochlea were used as starting point. As histopathology is more sensitive than the physiological effects this is a conservative starting point per se.

Differences in duration of exposure

1

The dose descriptor was obtained from a 90-day inhalation study. Although the DNEL is to be used to assess long-term repeated exposure there is strong evidence showing that the ototoxic effects of ethylbenzene have already occurred within the first four weeks of initial exposure and did not progress in severity with further exposure. On this basis, the duration of this study is adequate for the endpoint being studied and it is not necessary to apply a factor to take account of differences in duration of exposure.

Dose response and endpoint specific/severity issues

1

As an extrapolated NOAEC for the most sensitive endpoint, i.e. histopathological alterations of cochlear hair cells, was used an additional assessment factor is not warranted. The study encompassed a range of concentrations and durations of exposure and provided reliable information on the dose-response relationship and severity of effect.

Quality of database

1

The findings from the key study are supported by findings from several additional studies with similar aromatic solvents conducted to modern regulatory standards by separate groups of researchers. Therefore it is not necessary to apply an additional factor.  

Overall assessment factor:  12

Endpoint specific DNEL: 2150/12 = 180 mg/kg/d (8-hours) corresponding to 12500 mg/person/d

The worker DNEL long-term dermal route for systemic effects is 180 mg/kg/d. As a skin irritation DNEL is not quantifiable from the data this DNEL does not address the potential for local irritation.

 

 

 

 


\t\t\t
\n
\n

\n General Population - Hazard via inhalation route\n

\n

\n Systemic effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t DNEL (Derived No Effect Level)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n15\t\t\n\t\t\t mg/m\xc2\xb3\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\t repeated dose toxicity\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n5
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\t NOAEC\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t low hazard (no threshold derived)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Local effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n General Population - Hazard via dermal route\n

\n

\n Systemic effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Local effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n General Population - Hazard via oral route\n

\n

\n Systemic effects\n

\n

\n Long term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t DNEL (Derived No Effect Level)\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n1.6\t\t\n\t\t\t mg/kg bw/day\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\t repeated dose toxicity\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n40
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\t NOAEL\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for differences in duration of exposure\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Acute/short term exposure\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t no hazard identified\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tMost sensitive endpoint\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tRoute of original study\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n

\n DNEL related information\n

\n\t
\n\t\t
\n\t\t\tDNEL derivation method\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tOverall assessment factor (AF)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDNEL extrapolated from long term DNEL\n\t\t
\n
\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tDose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tModified dose descriptor starting point\n\t\t
\n\t
\n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n
\n
\n\t\t
\n\t\t
\n\t\t\tValue\n\t\t
\n\t\t
\n \n\t\t\n\t\t\n [Empty]\n\t\t\n\t\t\n \n
\n\t\t
\n\t
\n\t\t
\n\t\t\tExplanation for the modification of the dose descriptor starting point\n\t\t
\n\t\t\t
\n [Empty] \t\t\t
\n
\n\t
\n\t\t
\n\t\t\tAF for dose response relationship\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for interspecies differences (allometric scaling)\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for other interspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for intraspecies differences\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for the quality of the whole database\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n\t
\n\t\t
\n\t\t\tAF for remaining uncertainties\n\t\t
\n
\n\t\t\n [Empty]\n\t\t\n
\n\t\t
\n
\n\t\t
\n\t\t\tJustification\n\t\t
\n
\n

\n\t\t\n [Empty]\n\t\t\n

\n
\n
\n

\n Explanation for hazard conclusion\n

\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n General Population - Hazard for the eyes\n

\n

\n Local effects\n

\n\t
\n\t\t
\n\t\t\tHazard assessment conclusion\n\t\t
\n\t
\n\t\t\n\t\t\t low hazard (no threshold derived)\n\t\t\n
\n
\n\t
\n\t\t
\n\t\t\tExplanation for hazard conclusion\n\t\t
\n\t\t\t
\n [Not publishable] \t\t\t
\n
\n

\n Additional information - General Population\n

\n\t
\n\t\t
\n\t\t\t\n\t\t
\n\t\t\t
\n

B.5.11.6 General population DNEL long-term inhalation by exposure via the environment

As discussed in section B.5.11.4 long-term repeated exposure to ethylbenzene has the potential to cause ototoxicity and devlopmental toxicity.

For ototoxicity:

Repeated inhalation exposure to ethylbenzene vapor was irreversibly ototoxic in rats (Gagnaire et al., 2007).Auditory dysfunction characterised by an elevation of hearing thresholds was localised in the mid frequencies and corresponded to the loss of cochlear outer hair cells, the sensory cells in the inner ear. Hearing loss and cell damage increased with exposure concentrations.In this 90 day rat inhalation study (6 hours/day, 6 day/week) the NOAEC for ototoxicity was extrapolated to be 114 ppm (500 mg/m\xc2\xb3) based on destruction of outer hair cells in row 3 occurring already at 200 ppm. In contrast, no effect on hearing ability as indicated by audiometric thresholds was observed at 200 ppm. According to several case reports where hearing deficits in humans occupational exposed to organic solvents or from people after solvent abuse are described (for review cf Risk Assessment Reports on toluene and styrene), this rat data are taken to be relevant for humans.

 

Thus, this extrapolated NOAEC of 114 ppm (500 mg/m\xc2\xb3) is taken as dose descriptor for DNEL derivation of repeated dose toxicity for general population (consumer).

 

The extrapolated NOAEC of 114 ppm (500 mg/m\xc2\xb3) from the rat is (1) multiplied with a factor of 0.45 (for rat absorption percentage of 45%), divided by a divisor of 0.65 (for human absorption percentage after inhalation of 65%).A factor for activity-driven differences of respiratory volumes is not necessary for the general population (via environment). Further differences regarding the experimental inhalation duration (6 hours/day, 6 days/week) and the environmental exposure conditions (24 hours/day, 7 days/week) have to be taken into consideration by factors of 6/24 and 6/7.

The calculation results in an adjusted inhalation starting point of

114 x 0.45: 0.65 x 6/24 x 6/7 = 17ppm

In the overall assessment it has also be taken into consideration that the starting point to define ototoxicity is not auditory dysfunction but histopathological effects on the outer hair cells of the cochlea. Numerous investigations have shown that audiometric hearing deficits occur at higher exposure concentrations than (small) losses of hair cells in the outer row 3 of the cochlea. While the audiometric effects define the impact of ototoxicity for humans, the use of histopathological effects for the starting point implies a further conservative factor.

 

Hearing loss is not a specific effect for ethylbenzene but has also been found with other aromatic solvents like styrene, toluene, or xylenes. Detailed mechanistic studies, especially with styrene, have demonstrated that ototoxicity is exerted by the unmetabolized parent chemical. Studies in rats with styrene and toluene showed that hearing loss occurs within a few days of the start of exposure and although it does not increase in severity with prolongation of exposure, the initial effect is irreversible. According to Gagnaire et al. (2007) ethylbenzene led to maximal hearing impairment after about 4 weeks without further deterioration by prolongation of exposure.

 

Table B.6: Assessment factors and DNEL calculation for general population (consumer) (DIY) DNEL long-term inhalation for ototoxicity

Uncertainties

AF

Justification

Interspecies differences

1

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an inhalation study and is being used to derive an inhalation DNEL. It is therefore not necessary to apply an allometric scaling factor to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

5

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However an analysis of assessment factors conducted by ECETOC (2003, 2010) showed that metabolic differences due to genetic polymorphisms do not to automatically require an increased assessment factor since alternative pathways of elimination are often present. This consideration, together with estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states, supports use of an assessment factor of 5 to account for intraspecies variability within the general population.

The default factor of 5 for the general population as proposed by ECETOC (2003, 2010) will therefore be used to take account of intraspecies variability.

In total there is a combined inter- and intraspecies assessment factor of 5 that takes into account that the target tissue (outer hair cells in the cochlea, anatomical structure and blood supply of the cochlea) is identical in rats and humans. Furthermore, not hearing deficits but histopatological alterations of the cochlea were used as starting point. As histopathology is more sensitive than the physiological effects this is a conservative starting point per se.

Differences in duration of exposure

1

The dose descriptor was obtained from a 90-day inhalation study. Although the DNEL is to be used to assess long-term repeated exposure there is strong evidence showing that the ototoxic effects of ethylbenzene have already occurred within the first four weeks of initial exposure and did not progress in severity with further exposure. On this basis, the duration of this study is adequate for the endpoint being studied and it is not necessary to apply a factor to take account of differences in duration of exposure.

Dose response and endpoint specific/severity issues

1

As an extrapolated NOAEC for the most sensitive endpoint, i.e. histopathological alterations of cochlear hair cells, was used an additional assessment factor is not warranted. The study encompassed a range of concentrations and durations of exposure and provided reliable information on the dose-response relationship and severity of effect.

Quality of database

1

The findings from the key study are supported by findings from several additional studies with similar aromatic solvents conducted to modern regulatory standards by separate groups of researchers. Therefore it is not necessary to apply an additional factor.  

Overall assessment factor:  5

Endpoint specific DNEL: 17/5 = 3.4 ppm (24-hours) corresponding to 15 mg/m\xc2\xb3.

For derivation of a worker-DNEL long-term based on reproductive effects in the a multi-generation study, effects observed for the time between weaning and puberty did not apply. On the other hand, effects occurring during pre- and postnatal phases have to be taken into account for exposures via the environment.

 

According to section B.5.11.4.2a for definition of a DNEL two studies are available: A guideline 2-generation study with an indication of slight foetotoxicity (reduced foetal body weight and occasional increases in skeletal variations) in the presence of maternal toxicity with a NOAEC for foetal and maternal toxicity of 500 ppm and a preceding probe 1-generation reproduction toxicity study with a NOAEC of 100 ppm based on increased postnatal mortality and body weight gain depression in the offspring. This NOAEC of 100 ppm was governed by effects occurring after weaning when the offspring was directly exposed by inhalation. As a general strategy, the guideline 2-generation study with a large number of experimental animals should have precedence over the probe study with a more limited number of animals. But in order to derive a DNEL as conservative as possible for the general population exposed via the environment, the NOAEC of the probe 1-generation study will be used as starting point.

 

In this probe reproductive toxicity study rats were exposed for 6 hours per day, 7 days per week. Since animals were exposed for 6 hours per day, whereas the exposure via the environment is continuous, it is necessary to adjust the starting point by a factor of 6/24. This NOAEC of 100 ppm from the rat is further multiplied with a factor of 0.45 (for rat absorption percentage of 45 %) and divided by 0.65 (for human absorption percentage of 65 %). An adjustment for physical activity is not necessary.

 

The corrected starting point is therefore

100 x 6/24 x 0.45/0.65= 17.3 ppm.

 

Table B.7: Assessment factors and calculation for general population DNEL long-term inhalation via environment for developmental toxicity based on generation studies

Uncertainties

AF

Justification

Interspecies differences

1

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an inhalation study and is being used to derive an inhalation DNEL. It is therefore not necessary to apply an allometric scaling factor to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

5

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However an analysis of assessment factors conducted by ECETOC (2003, 2010) showed that metabolic differences due to genetic polymorphisms do not to automatically require an increased assessment factor since alternative pathways of elimination are often present. This consideration, together with estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states, supports use of an assessment factor of 5 to account for intraspecies variability within the general population.

The default factor of 5 for humans via environment as proposed by ECETOC will therefore be used to take account of intraspecies variability.

Differences in duration of exposure

1

The NOAEC was derived from the 2-generation study covering the whole reproductive cycle. An assessment factor of 1 is therefore appropriate.

Dose response and endpoint specific/severity issues

1

The NOAEC of 100 ppm was derived from generation studies covering the whole reproductive cycle. Therefore no additional assessment factor is necessary.

Quality of database

1

The key study was conducted to modern regulatory standards and was adequately reported. On this basis the quality of the database is not considered to contribute uncertainty and it is therefore not necessary to apply an additional factor.     

Overall assessment factor:  5

Endpoint specific DNEL: 17.3/5 = 3.5 ppm

For prenatal effects the derivation of the DNEL is based on the prenatal toxicity study in rats of Saillenfait et al. (2003) at exposure concentrations of 100, 500, 1000, and 2000 ppm during gestation days 6-20 with a NOAEC of 500 ppm for an exposure regime of 6 h/d throughout gestation days 6-20.

 

This NOAEC has to be adjusted by a factor of 6/24 to account for different daily exposure durations leading to 125 ppm. The dams were exposed every day during pregnancy. The NOAEC of 125 ppm from the rat is further multiplied with a factor of 0.45 (for rat absorption percentage of 45 %) and divided by 0.65 (for human absorption percentage of 65 %).

 

The adjusted starting point is therefore 86.5 ppm.

 

Table B.8: Assessment factors and DNEL calculation for general population (via environment) DNEL long-term inhalation for developmental toxicity (prenatal toxicity studies)

Uncertainties

AF

Justification

Interspecies differences

1

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an inhalation study and is being used to derive an inhalation DNEL. It is therefore not necessary to apply an allometric scaling factor to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

5

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However an analysis of assessment factors conducted by ECETOC (2003, 2010) showed that metabolic differences due to genetic polymorphisms do not to automatically require an increased assessment factor since alternative pathways of elimination are often present. This consideration, together with estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states, supports use of an assessment factor of 5 to account for intraspecies variability within the general population.

The default factor of 5 for the general population as proposed by ECETOC will therefore be used to take account of intraspecies variability.

Differences in duration of exposure

1

The NOAEC was derived from a prenatal toxicity study with an exposure regime from gestation day 6 up to the end of pregnancy at gestation day 20. An assessment factor of 1 is therefore appropriate.

Dose response and endpoint specific/severity issues

1

As 500 ppm was a clear NOAEC for developmental effects, an assessment factor is not warranted in this respect.

Quality of database

1

The key study was conducted to modern regulatory standards and was adequately reported. On this basis the quality of the database is not considered to contribute uncertainty and it is therefore not necessary to apply an additional factor.

Overall assessment factor:  5

Endpoint specific DNEL: 174/5 = 17.3 ppm (8-hour) corresponding to 76 mg/m\xc2\xb3

B.5.11.6h. Selection of DNEL long-term inhalation for the general population (via environment)

In conclusion a DNEL long-term for the general population by exposure via the environment of 3.4 ppm is proposed based on ototoxic effects. This DNEL is very similar to that derived from toxic effects observed in generation studies being 3.5 ppm.

 

B.5.11.7 General population (via environment) DNEL long-term oral route

 

For deriving a DNEL long-term oral route for the general population (via environment) two approaches should be considered:

starting from the lowest DNEL long-term inhalation route (ref section B.5.11.7b) for the general population (via environment)

starting from the rat 90-day oral study (Mellert et al., 2007) thereby obviating a route-to-route extrapolation.

 

a) starting from the DNEL long-term inhalation route for the general population (via environment)

 

The lowest general population DNEL long-term for the inhalation route via environment was calculated to be 3.4 ppm, corresponding to 15 mg/m\xc2\xb3 (see section B.5.11.7.b) based on ototoxicity. A respiratory volume of 20 m\xc2\xb3/person/d is used for the general population (ECHA, 2008) and an uptake by inhalation of 65%. This leads to an internal body burden of

15 x 20 x 0.65 = 195 mg/person/d, corresponding to 2.8 mg/kg/d.

 

After oral exposure up to 92% of the ethylbenzene dose could be recovered as urinary metabolites in rabbits (El Masry et al., 1952) and 84% in urine and feces of rats (Climie et al., 1983); therefore 100% absorption by oral exposure is taken for humans as default.

 

Thus, for the general population (via environment) the DNEL long-term oral derived from that by inhalation is 2.8 mg/kg/d.

 

b) starting from the rat 90-day oral study

 

An increase in liver and kidney weights of rats and mice without histopathological alterations has been found in several studies. These changes are most probably related to enzyme induction. The NOAEL in a guideline oral 90 day study with rats was 75 mg/kg bw/d (LOAEL 250 mg/kg bw/d) based on indications for a mild regenerative anemia and liver changes indicative of microsomal enzyme induction.

 

The animals were dosed daily by gavage. A 84% oral absorption is used for rats (Climie et al., 1983) and 100% for humans as conservative default leading to an internal dose of 75 x 0.84 = 63 mg/kg/d as starting point.

 

Table B.9: Assessment factors and DNEL calculation for the general population (via environment) long-term oral (based on rat oral study)

Uncertainties

AF

Justification

Interspecies differences

4

For interspecies variability the assessment factors proposed by ECETOC (2003, 2010) are used. The dose descriptor was obtained from an oral study and is being used to derive an inhalation DNEL. Therefore an allometric scaling factor of 4 is used to take account of differences in basal metabolic rates between animals and humans. According to ECETOC the additional assessment factor of 2.5 to quantify other differences between animals and humans that could affect interspecies extrapolation is scientifically not justified and a factor of 1 is appropriate. On this basis the default factor of 1 to account for other species differences will be applied.

Intraspecies differences

5

There are no data to quantify variability in susceptibility to the effects of long-term exposure to ethylbenzene in the human population. However an analysis of assessment factors conducted by ECETOC (2003, 2010) showed that metabolic differences due to genetic polymorphisms do not to automatically require an increased assessment factor since alternative pathways of elimination are often present. This consideration, together with estimates of the upper 95th percentile of the distribution of variability in toxicokinetic and toxicodynamic parameters for populations of different ages, genders and disease states, supports use of an assessment factor of 5 to account for intraspecies variability within the general population.

The default factor of 5 for the general population as proposed by ECETOC will therefore be used to take account of intraspecies variability.

Differences in duration of exposure

2

The NOAEL was obtained from a 90-day oral rat study. For extrapolation to chronic exposure an assessment factor of 2 is therefore appropriate according to REACH (2008) and ECETOC (2003, 2010).

Dose response and endpoint specific/severity issues

1

As 75 mg/kg/d was a clear NOAEL with a dose response relationship for toxicologically relevant effects at higher doses, an assessment factor is not warranted in this respect.

Quality of database

1

The key study was conducted to modern regulatory standards and was adequately reported. On this basis the quality of the database is not considered to contribute uncertainty and it is therefore not necessary to apply an additional factor.

Overall assessment factor:  40

Endpoint specific DNEL: 63/40 = 1.6 mg/kg/d

In conclusion, the DNEL long-term for the general population (via environment) by oral exposure is 1.6 mg/kg/d.

\t\t\t
\n
\n
\n
\n
\n
\n
\n\n
\n \n
\n \n\n' \ No newline at end of file diff --git a/docs/REFACTORING.md b/docs/REFACTORING.md deleted file mode 100644 index 8d08801..0000000 --- a/docs/REFACTORING.md +++ /dev/null @@ -1,211 +0,0 @@ -# Refactoring Summary - -## Completed: Phase 1 & 2 - -### Phase 1: Critical Bug Fixes ✅ - -**Fixed Issues:** - -1. **[base_classes.py](src/pif_compiler/classes/models.py)** (now renamed to `models.py`) - - Fixed missing closing parenthesis in `StringConstraints` annotation (line 24) - - File renamed to `models.py` for clarity - -2. **[pif_class.py](src/pif_compiler/classes/pif_class.py)** - - Removed unnecessary `streamlit` import - - Fixed duplicate `NormalUser` import conflict - - Fixed type annotations for optional fields (lines 33-36) - - Removed unused imports - -3. **[classes/__init__.py](src/pif_compiler/classes/__init__.py)** - - Created proper module exports - - Added docstring - - Listed all available models and enums - -### Phase 2: Code Organization ✅ - -**New Structure:** - -``` -src/pif_compiler/ -├── classes/ # Data Models -│ ├── __init__.py # ✨ NEW: Proper exports -│ ├── models.py # ✨ RENAMED from base_classes.py -│ ├── pif_class.py # ✅ FIXED: Import conflicts -│ └── types_enum.py -│ -├── services/ # ✨ NEW: Business Logic Layer -│ ├── __init__.py # Service exports -│ ├── echa_service.py # ECHA API (merged from find.py) -│ ├── echa_parser.py # HTML/Markdown/JSON parsing -│ ├── echa_extractor.py # High-level extraction -│ ├── cosing_service.py # COSING integration -│ ├── pubchem_service.py # PubChem integration -│ └── database_service.py # MongoDB operations -│ -└── functions/ # Utilities & Legacy - ├── _old/ # 🗄️ Deprecated files (moved here) - │ ├── echaFind.py # → Merged into echa_service.py - │ ├── find.py # → Merged into echa_service.py - │ ├── echaProcess.py # → Split into echa_parser + echa_extractor - │ ├── scraper_cosing.py # → Copied to cosing_service.py - │ ├── pubchem.py # → Copied to pubchem_service.py - │ └── mongo_functions.py # → Copied to database_service.py - ├── html_to_pdf.py # PDF generation utilities - ├── pdf_extraction.py # PDF processing utilities - └── resources/ # Static resources (logos, templates) -``` - ---- - -## Key Improvements - -### 1. **Separation of Concerns** -- **Models** (`classes/`): Pure data structures with Pydantic validation -- **Services** (`services/`): Business logic and external API calls -- **Functions** (`functions/`): Legacy code, will be gradually migrated - -### 2. **ECHA Module Consolidation** -Previously scattered across 3 files: -- `echaFind.py` (246 lines) - Old search implementation -- `find.py` (513 lines) - Better search with type hints -- `echaProcess.py` (947 lines) - Massive monolith - -Now organized into 3 focused modules: -- `echa_service.py` (~513 lines) - API integration (from `find.py`) -- `echa_parser.py` (~250 lines) - Data parsing/cleaning -- `echa_extractor.py` (~350 lines) - High-level extraction logic - -### 3. **Better Logging** -- Changed from module-level `logging.basicConfig()` to proper logger instances -- Each service has its own logger: `logger = logging.getLogger(__name__)` -- Prevents logging configuration conflicts - -### 4. **Improved Imports** -Services can now be imported cleanly: -```python -# Old way -from src.func.echaFind import search_dossier -from src.func.echaProcess import echaExtract - -# New way -from pif_compiler.services import search_dossier, echa_extract -``` - ---- - -## Migration Guide - -### For Code Using Old Imports - -**ECHA Functions:** -```python -# Before -from src.func.find import search_dossier -from src.func.echaProcess import echaExtract, echaPage_to_md, clean_json - -# After -from pif_compiler.services import ( - search_dossier, - echa_extract, - echa_page_to_markdown, - clean_json -) -``` - -**Data Models:** -```python -# Before -from classes import Ingredient, PIF -from base_classes import ExpositionInfo - -# After -from pif_compiler.classes import Ingredient, PIF, ExpositionInfo -``` - -**COSING/PubChem:** -```python -# Before -from functions.scraper_cosing import cosing_search -from functions.pubchem import pubchem_dap - -# After (when ready) -from pif_compiler.services.cosing_service import cosing_search -from pif_compiler.services.pubchem_service import pubchem_dap -``` - ---- - -## Next Steps (Phase 3 - Not Done Yet) - -### Configuration Management -- [ ] Create `config.py` for MongoDB credentials, API keys -- [ ] Use environment variables (.env file) -- [ ] Separate dev/prod configurations - -### Testing -- [ ] Add pytest setup -- [ ] Unit tests for models (Pydantic validation) -- [ ] Integration tests for services -- [ ] Mock external API calls - -### Streamlit App -- [ ] Create `app.py` entry point -- [ ] Organize UI components -- [ ] Connect to services layer - -### Database -- [ ] Document MongoDB schema -- [ ] Add migration scripts -- [ ] Consider adding SQLAlchemy for relational DB - -### Documentation -- [ ] API documentation (docstrings → Sphinx) -- [ ] User guide for PIF creation workflow -- [ ] Developer setup guide - ---- - -## Files Changed - -### Modified: -- `src/pif_compiler/classes/models.py` (renamed, fixed) -- `src/pif_compiler/classes/pif_class.py` (fixed imports/types) -- `src/pif_compiler/classes/__init__.py` (new exports) - -### Created: -- `src/pif_compiler/services/__init__.py` -- `src/pif_compiler/services/echa_service.py` -- `src/pif_compiler/services/echa_parser.py` -- `src/pif_compiler/services/echa_extractor.py` -- `src/pif_compiler/services/cosing_service.py` -- `src/pif_compiler/services/pubchem_service.py` -- `src/pif_compiler/services/database_service.py` - -### Moved to Archive: -- `src/pif_compiler/functions/_old/echaFind.py` (merged into echa_service.py) -- `src/pif_compiler/functions/_old/find.py` (merged into echa_service.py) -- `src/pif_compiler/functions/_old/echaProcess.py` (split into echa_parser + echa_extractor) -- `src/pif_compiler/functions/_old/scraper_cosing.py` (copied to cosing_service.py) -- `src/pif_compiler/functions/_old/pubchem.py` (copied to pubchem_service.py) -- `src/pif_compiler/functions/_old/mongo_functions.py` (copied to database_service.py) - -### Kept (Active): -- `src/pif_compiler/functions/html_to_pdf.py` (PDF utilities) -- `src/pif_compiler/functions/pdf_extraction.py` (PDF utilities) -- `src/pif_compiler/functions/resources/` (Static files) - ---- - -## Benefits - -✅ **Cleaner imports** - No more relative path confusion -✅ **Better testing** - Services can be mocked easily -✅ **Easier debugging** - Smaller, focused modules -✅ **Type safety** - Proper type hints throughout -✅ **Maintainability** - Clear separation of concerns -✅ **Backward compatible** - Old code still works - ---- - -**Date:** 2025-01-04 -**Status:** Phase 1 & 2 Complete ✅ diff --git a/docs/test_summary.md b/docs/test_summary.md deleted file mode 100644 index a8a696e..0000000 --- a/docs/test_summary.md +++ /dev/null @@ -1,295 +0,0 @@ -# ECHA Services Test Suite Summary - -## Overview - -Comprehensive test suites have been created for all three ECHA service modules, following a bottom-up approach from lowest to highest level dependencies. - -## Test Files Created - -### 1. test_echa_parser.py (Lowest Level) -**Location**: `tests/test_echa_parser.py` - -**Coverage**: Tests for HTML/Markdown/JSON processing functions - -**Test Classes**: -- `TestOpenEchaPage` - HTML page opening (remote & local) -- `TestEchaPageToMarkdown` - HTML to Markdown conversion -- `TestMarkdownToJsonRaw` - Markdown to JSON conversion (skipped if markdown_to_json not installed) -- `TestNormalizeUnicodeCharacters` - Unicode normalization -- `TestCleanJson` - JSON cleaning and validation -- `TestIntegrationParser` - Full pipeline integration tests - -**Total Tests**: 28 tests -- 20 tests for core parser functions -- 5 tests for markdown_to_json (conditional) -- 2 integration tests -- 1 test with known Unicode encoding issue (needs fix) - -**Key Features**: -- Mocks external dependencies (requests, file I/O) -- Tests Unicode handling edge cases -- Validates data cleaning logic -- Tests comparison operator conversions (>, <, >=, <=) - -**Known Issues**: -- Unicode literal encoding in test strings (line 372, 380) - use `chr()` instead of `\uXXXX` -- Missing `markdown_to_json` dependency (tests skip gracefully) - -###2. test_echa_service.py (Middle Level) -**Location**: `tests/test_echa_service.py` - -**Coverage**: Tests for ECHA API interaction and search functions - -**Test Classes**: -- `TestGetSubstanceByIdentifier` - Substance API search -- `TestGetDossierByRmlId` - Dossier retrieval with Active/Inactive fallback -- `TestExtractSectionLinks` - Section link extraction with validation -- `TestParseSectionsFromHtml` - HTML parsing for multiple sections -- `TestGetSectionLinksFromIndex` - Remote index.html fetching -- `TestGetSectionLinksFromFile` - Local file parsing -- `TestSearchDossier` - Main search workflow -- `TestIntegrationEchaService` - Real API integration tests (marked @pytest.mark.integration) - -**Total Tests**: 36 tests -- 30 unit tests with mocked APIs -- 3 integration tests (require internet, marked for manual execution) - -**Key Features**: -- Comprehensive API mocking -- Tests nested section bug fix (parent vs child section links) -- Tests URL encoding, error handling, fallback logic -- Tests local vs remote workflows -- Integration tests for real formaldehyde data - -**Testing Approach**: -- Unit tests run by default (fast, no external deps) -- Integration tests require `-m integration` flag - -### 3. test_echa_extractor.py (Highest Level) -**Location**: `tests/test_echa_extractor.py` - -**Coverage**: Tests for high-level extraction orchestration - -**Test Classes**: -- `TestSchemas` - Data schema validation -- `TestJsonToDataframe` - JSON to pandas DataFrame conversion -- `TestDfWrapper` - DataFrame metadata addition -- `TestEchaExtractLocal` - DuckDB cache querying -- `TestEchaExtract` - Main extraction workflow -- `TestIntegrationEchaExtractor` - Real data integration tests (marked @pytest.mark.integration) - -**Total Tests**: 32 tests -- 28 unit tests with full mocking -- 4 integration tests (require internet) - -**Key Features**: -- Tests both RepeatedDose and AcuteToxicity schemas -- Tests local cache (DuckDB) integration -- Tests key information extraction -- Tests error handling at each pipeline stage -- Tests DataFrame vs JSON output modes -- Validates metadata addition (substance, CAS, timestamps) - -**Testing Strategy**: -- Mocks entire pipeline: search → parse → convert → clean → wrap -- Tests local_search and local_only modes -- Tests graceful degradation (returns key_infos on main extraction failure) - -## Test Architecture - -``` -test_echa_parser.py (Data Transformation) - ↓ -test_echa_service.py (API & Search) - ↓ -test_echa_extractor.py (Orchestration) -``` - -### Dependency Flow -1. **Parser** (lowest) - No dependencies on other ECHA modules -2. **Service** (middle) - Depends on Parser for some functionality -3. **Extractor** (highest) - Depends on both Service and Parser - -### Mock Strategy -- **Parser**: Mocks `requests`, file I/O, `os.makedirs` -- **Service**: Mocks `requests.get` for API calls, HTML content -- **Extractor**: Mocks entire pipeline chain (search_dossier, open_echa_page, etc.) - -## Running the Tests - -### Run All Tests -```bash -uv run pytest tests/test_echa_*.py -v -``` - -### Run Specific Module -```bash -uv run pytest tests/test_echa_parser.py -v -uv run pytest tests/test_echa_service.py -v -uv run pytest tests/test_echa_extractor.py -v -``` - -### Run Only Unit Tests (Fast) -```bash -uv run pytest tests/test_echa_*.py -v -m "not integration" -``` - -### Run Integration Tests (Requires Internet) -```bash -uv run pytest tests/test_echa_*.py -v -m integration -``` - -### Run With Coverage -```bash -uv run pytest tests/test_echa_*.py --cov=pif_compiler.services --cov-report=html -``` - -## Test Coverage Summary - -### Functions Tested - -#### echa_parser.py (5/5 = 100%) -- ✅ `open_echa_page()` - Remote & local file opening -- ✅ `echa_page_to_markdown()` - HTML to Markdown with route formatting -- ✅ `markdown_to_json_raw()` - Markdown parsing & JSON conversion -- ✅ `normalize_unicode_characters()` - Unicode normalization -- ✅ `clean_json()` - Recursive cleaning & validation - -#### echa_service.py (8/8 = 100%) -- ✅ `search_dossier()` - Main entry point with local file support -- ✅ `get_substance_by_identifier()` - Substance API search -- ✅ `get_dossier_by_rml_id()` - Dossier retrieval with fallback -- ✅ `_query_dossier_api()` - Helper for API queries -- ✅ `get_section_links_from_index()` - Remote HTML fetching -- ✅ `get_section_links_from_file()` - Local HTML parsing -- ✅ `parse_sections_from_html()` - HTML content parsing -- ✅ `extract_section_links()` - Individual section extraction with validation - -#### echa_extractor.py (4/4 = 100%) -- ✅ `echa_extract()` - Main extraction function -- ✅ `echa_extract_local()` - DuckDB cache queries -- ✅ `json_to_dataframe()` - JSON to DataFrame conversion -- ✅ `df_wrapper()` - Metadata addition - -**Total Functions**: 17/17 tested (100%) - -## Edge Cases Covered - -### Parser -- Empty/malformed HTML -- Missing sections -- Unicode encoding issues (â€, superscripts) -- Comparison operators (>, <, >=, <=) -- Nested structures -- [Empty] value filtering -- "no information available" filtering - -### Service -- Substance not found -- No dossiers (active or inactive) -- Nested sections (parent without direct link) -- Input type mismatches -- Network errors -- Malformed API responses -- Local vs remote file paths - -### Extractor -- Substance not found -- Missing scraping type pages -- Empty sections -- Empty cleaned JSON -- Local cache hits/misses -- Key information extraction -- DataFrame filtering (null Effect levels) -- JSON vs DataFrame output modes - -## Dependencies Required - -### Core Dependencies (Already in project) -- pytest -- pytest-mock -- pytest-cov -- beautifulsoup4 -- pandas -- requests -- markdownify -- pydantic - -### Optional Dependencies (Tests skip if missing) -- `markdown_to_json` - Required for markdown→JSON conversion tests -- `duckdb` - Required for local cache tests -- Internet connection - Required for integration tests - -## Test Markers - -### Custom Markers (defined in conftest.py) -- `@pytest.mark.unit` - Fast tests, no external dependencies -- `@pytest.mark.integration` - Tests requiring real APIs/internet -- `@pytest.mark.slow` - Long-running tests -- `@pytest.mark.database` - Tests requiring database - -### Usage in ECHA Tests -- Unit tests: Default (run without flags) -- Integration tests: Require `-m integration` -- Skipped tests: Auto-skip if dependencies missing - -## Known Issues & Improvements Needed - -### 1. Unicode Test Encoding (test_echa_parser.py) -**Issue**: Lines 372 and 380 have truncated Unicode escape sequences -**Fix**: Replace `\u00c2\u00b²` with `chr(0xc2) + chr(0xb2)` -**Priority**: Medium - -### 2. Missing markdown_to_json Dependency -**Issue**: Tests skip if not installed -**Fix**: Add to project dependencies or document as optional -**Priority**: Low (tests gracefully skip) - -### 3. Integration Test Data -**Issue**: Real API tests may fail if ECHA structure changes -**Fix**: Add recorded fixtures for deterministic testing -**Priority**: Low - -### 4. DuckDB Integration -**Issue**: test_echa_extractor local cache tests mock DuckDB -**Fix**: Create actual test database for integration testing -**Priority**: Low - -## Test Statistics - -| Module | Total Tests | Unit Tests | Integration Tests | Skipped (Conditional) | -|--------|-------------|------------|-------------------|-----------------------| -| echa_parser.py | 28 | 26 | 2 | 7 (markdown_to_json) | -| echa_service.py | 36 | 33 | 3 | 0 | -| echa_extractor.py | 32 | 28 | 4 | 0 | -| **TOTAL** | **96** | **87** | **9** | **7** | - -## Next Steps - -1. **Fix Unicode encoding** in test_echa_parser.py (lines 372, 380) -2. **Run full test suite** to verify all unit tests pass -3. **Add markdown_to_json** to dependencies if needed -4. **Run integration tests** manually to verify real API behavior -5. **Generate coverage report** to identify any untested code paths -6. **Document test patterns** for future service additions -7. **Add CI/CD integration** for automated testing - -## Contributing - -When adding new functions to ECHA services: - -1. **Write tests first** (TDD approach) -2. **Follow existing patterns**: - - One test class per function - - Mock external dependencies - - Test happy path + edge cases - - Add integration tests for real API behavior -3. **Use appropriate markers**: `@pytest.mark.integration` for slow tests -4. **Update this document** with new test coverage - -## References - -- Main documentation: [docs/echa_architecture.md](echa_architecture.md) -- Test patterns: [tests/test_cosing_service.py](../tests/test_cosing_service.py) -- pytest configuration: [pytest.ini](../pytest.ini) -- Test fixtures: [tests/conftest.py](../tests/conftest.py) diff --git a/docs/testing_guide.md b/docs/testing_guide.md deleted file mode 100644 index 5b3d135..0000000 --- a/docs/testing_guide.md +++ /dev/null @@ -1,767 +0,0 @@ -# Testing Guide - Theory and Best Practices - -## Table of Contents -- [Introduction](#introduction) -- [Your Current Approach vs. Test-Driven Development](#your-current-approach-vs-test-driven-development) -- [The Testing Pyramid](#the-testing-pyramid) -- [Key Concepts](#key-concepts) -- [Real-World Testing Workflow](#real-world-testing-workflow) -- [Regression Testing](#regression-testing---the-killer-feature) -- [Code Coverage](#coverage---how-much-is-tested) -- [Best Practices](#best-practices-summary) -- [Practical Examples](#practical-example-your-workflow) -- [When Should You Write Tests](#when-should-you-write-tests) -- [Getting Started](#your-next-steps) - ---- - -## Introduction - -This guide explains the theory and best practices of software testing, specifically for the PIF Compiler project. It moves beyond ad-hoc testing scripts to a comprehensive, automated testing approach. - ---- - -## Your Current Approach vs. Test-Driven Development - -### What You Do Now (Ad-hoc Scripts): - -```python -# test_script.py -from cosing_service import cosing_search - -result = cosing_search("WATER", mode="name") -print(result) # Look at output, check if it looks right -``` - -**Problems:** -- ❌ Manual checking (is the output correct?) -- ❌ Not repeatable (you forget what "correct" looks like) -- ❌ Doesn't catch regressions (future changes break old code) -- ❌ No documentation (what should the function do?) -- ❌ Tedious for many functions - ---- - -## The Testing Pyramid - -``` - /\ - / \ E2E Tests (Few) - /----\ - / \ Integration Tests (Some) - /--------\ - / \ Unit Tests (Many) - /____________\ -``` - -### 1. **Unit Tests** (Bottom - Most Important) - -Test individual functions in isolation. - -**Example:** -```python -def test_parse_cas_numbers_single(): - """Test parsing a single CAS number.""" - result = parse_cas_numbers(["7732-18-5"]) - assert result == ["7732-18-5"] # ← Automated check -``` - -**Benefits:** -- ✅ Fast (milliseconds) -- ✅ No external dependencies (no API, no database) -- ✅ Pinpoint exact problem -- ✅ Run hundreds in seconds - -**When to use:** -- Testing individual functions -- Testing data parsing/validation -- Testing business logic calculations - ---- - -### 2. **Integration Tests** (Middle) - -Test multiple components working together. - -**Example:** -```python -def test_full_cosing_workflow(): - """Test search + clean workflow.""" - raw = cosing_search("WATER", mode="name") - clean = clean_cosing(raw) - assert "cosingUrl" in clean -``` - -**Benefits:** -- ✅ Tests real interactions -- ✅ Catches integration bugs - -**Drawbacks:** -- ⚠️ Slower (hits real APIs) -- ⚠️ Requires internet/database - -**When to use:** -- Testing workflows across multiple services -- Testing API integrations -- Testing database interactions - ---- - -### 3. **E2E Tests** (End-to-End - Top - Fewest) - -Test entire application flow (UI → Backend → Database). - -**Example:** -```python -def test_create_pif_from_ui(): - """User creates PIF through Streamlit UI.""" - # Click buttons, fill forms, verify PDF generated -``` - -**When to use:** -- Testing complete user workflows -- Smoke tests before deployment -- Critical business processes - ---- - -## Key Concepts - -### 1. **Assertions - Automated Verification** - -**Old way (manual):** -```python -result = parse_cas_numbers(["7732-18-5/56-81-5"]) -print(result) # You look at: ['7732-18-5', '56-81-5'] -# Is this right? Maybe? You forget in 2 weeks. -``` - -**Test way (automated):** -```python -def test_parse_multiple_cas(): - result = parse_cas_numbers(["7732-18-5/56-81-5"]) - assert result == ["7732-18-5", "56-81-5"] # ← Computer checks! - # If wrong, test FAILS immediately -``` - -**Common Assertions:** -```python -# Equality -assert result == expected - -# Truthiness -assert result is not None -assert "key" in result - -# Exceptions -with pytest.raises(ValueError): - invalid_function() - -# Approximate equality (for floats) -assert result == pytest.approx(3.14159, rel=1e-5) -``` - ---- - -### 2. **Mocking - Control External Dependencies** - -**Problem:** Testing `cosing_search()` hits the real COSING API: -- ⚠️ Slow (network request) -- ⚠️ Unreliable (API might be down) -- ⚠️ Expensive (rate limits) -- ⚠️ Hard to test errors (how do you make API return error?) - -**Solution: Mock it!** -```python -from unittest.mock import Mock, patch - -@patch('cosing_service.req.post') # Replace real HTTP request -def test_search_by_name(mock_post): - # Control what the "API" returns - mock_response = Mock() - mock_response.json.return_value = { - "results": [{"metadata": {"inciName": ["WATER"]}}] - } - mock_post.return_value = mock_response - - result = cosing_search("WATER", mode="name") - - assert result["inciName"] == ["WATER"] # ← Test your logic, not the API - mock_post.assert_called_once() # Verify it was called -``` - -**Benefits:** -- ✅ Fast (no real network) -- ✅ Reliable (always works) -- ✅ Can test error cases (mock API failures) -- ✅ Isolate your code from external issues - -**What to mock:** -- HTTP requests (`requests.get`, `requests.post`) -- Database calls (`db.find_one`, `db.insert`) -- File I/O (`open`, `read`, `write`) -- External APIs (COSING, ECHA, PubChem) -- Time-dependent functions (`datetime.now()`) - ---- - -### 3. **Fixtures - Reusable Test Data** - -**Without fixtures (repetitive):** -```python -def test_clean_basic(): - data = {"inciName": ["WATER"], "casNo": ["7732-18-5"], ...} - result = clean_cosing(data) - assert ... - -def test_clean_empty(): - data = {"inciName": ["WATER"], "casNo": ["7732-18-5"], ...} # Copy-paste! - result = clean_cosing(data) - assert ... -``` - -**With fixtures (DRY - Don't Repeat Yourself):** -```python -# conftest.py -@pytest.fixture -def sample_cosing_response(): - """Reusable COSING response data.""" - return { - "inciName": ["WATER"], - "casNo": ["7732-18-5"], - "substanceId": ["12345"] - } - -# test file -def test_clean_basic(sample_cosing_response): # Auto-injected! - result = clean_cosing(sample_cosing_response) - assert result["inciName"] == "WATER" - -def test_clean_empty(sample_cosing_response): # Reuse same data! - result = clean_cosing(sample_cosing_response) - assert "cosingUrl" in result -``` - -**Benefits:** -- ✅ No code duplication -- ✅ Centralized test data -- ✅ Easy to update (change once, affects all tests) -- ✅ Auto-cleanup (fixtures can tear down resources) - -**Common fixture patterns:** -```python -# Database fixture with cleanup -@pytest.fixture -def test_db(): - db = connect_to_test_db() - yield db # Test runs here - db.drop_all() # Cleanup after test - -# Temporary file fixture -@pytest.fixture -def temp_file(tmp_path): - file_path = tmp_path / "test.json" - file_path.write_text('{"test": "data"}') - return file_path # Auto-cleaned by pytest -``` - ---- - -## Real-World Testing Workflow - -### Scenario: You Add a New Feature - -**Step 1: Write the test FIRST (TDD - Test-Driven Development):** -```python -def test_parse_cas_removes_parentheses(): - """CAS numbers with parentheses should be cleaned.""" - result = parse_cas_numbers(["7732-18-5 (hydrate)"]) - assert result == ["7732-18-5"] -``` - -**Step 2: Run test - it FAILS (expected!):** -```bash -$ uv run pytest tests/test_cosing_service.py::test_parse_cas_removes_parentheses - -FAILED: AssertionError: assert ['7732-18-5 (hydrate)'] == ['7732-18-5'] -``` - -**Step 3: Write code to make it pass:** -```python -def parse_cas_numbers(cas_string: list) -> list: - cas_string = cas_string[0] - cas_string = re.sub(r"\([^)]*\)", "", cas_string) # ← Add this - # ... rest of function -``` - -**Step 4: Run test again - it PASSES:** -```bash -$ uv run pytest tests/test_cosing_service.py::test_parse_cas_removes_parentheses - -PASSED ✓ -``` - -**Step 5: Refactor if needed - tests ensure you don't break anything!** - ---- - -### TDD Cycle (Red-Green-Refactor) - -``` -1. RED: Write failing test - ↓ -2. GREEN: Write minimal code to pass - ↓ -3. REFACTOR: Improve code without breaking tests - ↓ - Repeat -``` - -**Benefits:** -- ✅ Forces you to think about requirements first -- ✅ Prevents over-engineering -- ✅ Built-in documentation (tests show intended behavior) -- ✅ Confidence to refactor - ---- - -## Regression Testing - The Killer Feature - -**Scenario: You change code 6 months later:** - -```python -# Original (working) -def parse_cas_numbers(cas_string: list) -> list: - cas_string = cas_string[0] - cas_string = re.sub(r"\([^)]*\)", "", cas_string) - cas_parts = re.split(r"[/;,]", cas_string) # Handles /, ;, , - return [cas.strip() for cas in cas_parts] - -# You "improve" it -def parse_cas_numbers(cas_string: list) -> list: - return cas_string[0].split("/") # Simpler! But... -``` - -**Run tests:** -```bash -$ uv run pytest - -FAILED: test_multiple_cas_with_semicolon -Expected: ['7732-18-5', '56-81-5'] -Got: ['7732-18-5;56-81-5'] # ← Oops, broke semicolon support! - -FAILED: test_cas_with_parentheses -Expected: ['7732-18-5'] -Got: ['7732-18-5 (hydrate)'] # ← Broke parentheses removal! -``` - -**Without tests:** -- You deploy -- Users report bugs -- You're confused what broke -- Spend hours debugging - -**With tests:** -- Instant feedback -- Fix before deploying -- Save hours of debugging - ---- - -## Coverage - How Much Is Tested? - -### Running Coverage - -```bash -uv run pytest --cov=src/pif_compiler --cov-report=html -``` - -### Sample Output - -``` -Name Stmts Miss Cover --------------------------------------------------- -cosing_service.py 89 5 94% -echa_service.py 156 89 43% -models.py 45 45 0% --------------------------------------------------- -TOTAL 290 139 52% -``` - -### Interpretation - -- ✅ `cosing_service.py` - **94% covered** (great!) -- ⚠️ `echa_service.py` - **43% covered** (needs more tests) -- ❌ `models.py` - **0% covered** (no tests yet) - -### Coverage Goals - -| Coverage | Status | Action | -|----------|--------|--------| -| 90-100% | ✅ Excellent | Maintain | -| 70-90% | ⚠️ Good | Add edge cases | -| 50-70% | ⚠️ Acceptable | Prioritize critical paths | -| <50% | ❌ Poor | Add tests immediately | - -**Target:** 80%+ for business-critical code - -### HTML Coverage Report - -```bash -uv run pytest --cov=src/pif_compiler --cov-report=html -# Open htmlcov/index.html in browser -``` - -Shows: -- Which lines are tested (green) -- Which lines are not tested (red) -- Which branches are not covered - ---- - -## Best Practices Summary - -### ✅ DO: - -1. **Write tests for all business logic** - ```python - # YES: Test calculations - def test_sed_calculation(): - ingredient = Ingredient(quantity=10.0, dap=0.5) - assert ingredient.calculate_sed() == 5.0 - ``` - -2. **Mock external dependencies** - ```python - # YES: Mock API calls - @patch('cosing_service.req.post') - def test_search(mock_post): - mock_post.return_value.json.return_value = {...} - ``` - -3. **Test edge cases** - ```python - # YES: Test edge cases - def test_parse_empty_cas(): - assert parse_cas_numbers([""]) == [] - - def test_parse_invalid_cas(): - with pytest.raises(ValueError): - parse_cas_numbers(["abc-def-ghi"]) - ``` - -4. **Keep tests simple** - ```python - # YES: One test = one thing - def test_cas_removes_whitespace(): - assert parse_cas_numbers([" 123-45-6 "]) == ["123-45-6"] - - # NO: Testing multiple things - def test_cas_everything(): - assert parse_cas_numbers([" 123-45-6 "]) == ["123-45-6"] - assert parse_cas_numbers(["123-45-6/789-01-2"]) == [...] - # Too much in one test! - ``` - -5. **Run tests before committing** - ```bash - git add . - uv run pytest # ← Always run first! - git commit -m "Add feature X" - ``` - -6. **Use descriptive test names** - ```python - # YES: Describes what it tests - def test_parse_cas_removes_parenthetical_info(): - ... - - # NO: Vague - def test_cas_1(): - ... - ``` - ---- - -### ❌ DON'T: - -1. **Don't test external libraries** - ```python - # NO: Testing if requests.post works - def test_requests_library(): - response = requests.post("https://example.com") - assert response.status_code == 200 - - # YES: Test YOUR code that uses requests - @patch('requests.post') - def test_my_search_function(mock_post): - ... - ``` - -2. **Don't make tests dependent on each other** - ```python - # NO: test_b depends on test_a - def test_a_creates_data(): - db.insert({"id": 1, "name": "test"}) - - def test_b_uses_data(): - data = db.find_one({"id": 1}) # Breaks if test_a fails! - - # YES: Each test is independent - def test_b_uses_data(): - db.insert({"id": 1, "name": "test"}) # Create own data - data = db.find_one({"id": 1}) - ``` - -3. **Don't test implementation details** - ```python - # NO: Testing internal variable names - def test_internal_state(): - obj = MyClass() - assert obj._internal_var == "value" # Breaks with refactoring - - # YES: Test public behavior - def test_public_api(): - obj = MyClass() - assert obj.get_value() == "value" - ``` - -4. **Don't skip tests** - ```python - # NO: Commenting out failing tests - # def test_broken_feature(): - # assert broken_function() == "expected" - - # YES: Fix the test or mark as TODO - @pytest.mark.skip(reason="Feature not implemented yet") - def test_future_feature(): - ... - ``` - ---- - -## Practical Example: Your Workflow - -### Before (Manual Script) - -```python -# test_water.py -from cosing_service import cosing_search, clean_cosing - -result = cosing_search("WATER", "name") -print(result) # ← You manually check - -clean = clean_cosing(result) -print(clean) # ← You manually check again - -# Run 10 times with different inputs... tedious! -``` - -**Problems:** -- Manual verification -- Slow (type command, read output, verify) -- Error-prone (miss things) -- Not repeatable - ---- - -### After (Automated Tests) - -```python -# tests/test_cosing_service.py -def test_search_and_clean_water(): - """Water should be searchable and cleanable.""" - result = cosing_search("WATER", "name") - assert result is not None - assert "inciName" in result - - clean = clean_cosing(result) - assert clean["inciName"] == "WATER" - assert "cosingUrl" in clean - -# Run ONCE: pytest -# It checks everything automatically! -``` - -**Run all 25 tests:** -```bash -$ uv run pytest - -tests/test_cosing_service.py::TestParseCasNumbers::test_single_cas_number PASSED -tests/test_cosing_service.py::TestParseCasNumbers::test_multiple_cas_with_slash PASSED -... -======================== 25 passed in 0.5s ======================== -``` - -**Benefits:** -- ✅ All pass? Safe to deploy! -- ❌ One fails? Fix before deploying! -- ⏱️ 25 tests in 0.5 seconds vs. manual testing for 30 minutes - ---- - -## When Should You Write Tests? - -### Always Test: - -✅ **Business logic** (calculations, data processing) -```python -# YES -def test_calculate_sed(): - assert calculate_sed(quantity=10, dap=0.5) == 5.0 -``` - -✅ **Data validation** (Pydantic models) -```python -# YES -def test_ingredient_validates_cas_format(): - with pytest.raises(ValidationError): - Ingredient(cas="invalid", quantity=10.0) -``` - -✅ **API integrations** (with mocks) -```python -# YES -@patch('requests.post') -def test_cosing_search(mock_post): - ... -``` - -✅ **Bug fixes** (write test first, then fix) -```python -# YES -def test_bug_123_empty_cas_crash(): - """Regression test for bug #123.""" - result = parse_cas_numbers([]) # Used to crash - assert result == [] -``` - ---- - -### Sometimes Test: - -⚠️ **UI code** (harder to test, less critical) -```python -# Streamlit UI tests are complex, lower priority -``` - -⚠️ **Configuration** (usually simple) -```python -# Config loading is straightforward, test if complex logic -``` - ---- - -### Don't Test: - -❌ **Third-party libraries** (they have their own tests) -```python -# NO: Testing if pandas works -def test_pandas_dataframe(): - df = pd.DataFrame({"a": [1, 2, 3]}) - assert len(df) == 3 # Pandas team already tested this! -``` - -❌ **Trivial code** -```python -# NO: Testing simple getters/setters -class MyClass: - def get_name(self): - return self.name # Too simple to test -``` - ---- - -## Your Next Steps - -### 1. Install Pytest - -```bash -cd c:\Users\adish\Projects\pif_compiler -uv add --dev pytest pytest-cov pytest-mock -``` - -### 2. Run the COSING Tests - -```bash -# Run all tests -uv run pytest - -# Run with verbose output -uv run pytest -v - -# Run specific test file -uv run pytest tests/test_cosing_service.py - -# Run specific test -uv run pytest tests/test_cosing_service.py::TestParseCasNumbers::test_single_cas_number -``` - -### 3. See Coverage - -```bash -# Terminal report -uv run pytest --cov=src/pif_compiler/services/cosing_service - -# HTML report (more detailed) -uv run pytest --cov=src/pif_compiler --cov-report=html -# Open htmlcov/index.html in browser -``` - -### 4. Start Writing Tests for New Code - -Follow the TDD cycle: -1. **Red**: Write failing test -2. **Green**: Write minimal code to pass -3. **Refactor**: Improve code -4. Repeat! - ---- - -## Additional Resources - -### Pytest Documentation -- [Official Pytest Docs](https://docs.pytest.org/) -- [Pytest Fixtures](https://docs.pytest.org/en/stable/fixture.html) -- [Pytest Mocking](https://docs.pytest.org/en/stable/monkeypatch.html) - -### Testing Philosophy -- [Test-Driven Development (TDD)](https://www.freecodecamp.org/news/test-driven-development-what-it-is-and-what-it-is-not-41fa6bca02a2/) -- [Testing Best Practices](https://testautomationuniversity.com/) -- [The Testing Pyramid](https://martinfowler.com/articles/practical-test-pyramid.html) - -### PIF Compiler Specific -- [tests/README.md](../tests/README.md) - Test suite documentation -- [tests/RUN_TESTS.md](../tests/RUN_TESTS.md) - Quick start guide -- [REFACTORING.md](../REFACTORING.md) - Code organization changes - ---- - -## Summary - -**Testing transforms your development workflow:** - -| Without Tests | With Tests | -|---------------|------------| -| Manual verification | Automated checks | -| Slow feedback | Instant feedback | -| Fear of breaking things | Confidence to refactor | -| Undocumented behavior | Tests as documentation | -| Debug for hours | Pinpoint issues immediately | - -**Start small:** -1. Write tests for one service (✅ COSING done!) -2. Add tests for new features -3. Fix bugs with tests first -4. Gradually increase coverage - -**The investment pays off:** -- Fewer bugs in production -- Faster development (less debugging) -- Better code design -- Easier collaboration -- Peace of mind 😌 - ---- - -*Last updated: 2025-01-04* diff --git a/docs/user_journey.md b/docs/user_journey.md deleted file mode 100644 index 72b21ee..0000000 --- a/docs/user_journey.md +++ /dev/null @@ -1,6 +0,0 @@ -# User Journey - -1) User login or signs up - - For this function we will use the internal component of streamlit to handle authentication, and behind i will have a supabase db (work-in progress) -2) Open recent or create a new project: - - This is where we open an existing file of project with all the specifics or we create a new one \ No newline at end of file diff --git a/main.db b/main.db deleted file mode 100644 index 88ec05a2717ff0b85415f32f058b8d67d4e1929f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI#u?fOZ5CG6Gh`5A}Z6F#VhExjL33k?^?qKB#LYA;|4>xcS7tkUE!4R-Yns?0| zcLxWryTRx&iJtdjy<2UT;lDU1@hnc7caLd4J!Gden>X*65FkK+009C72oNAZfB*pk z?Frmo*UNq}-c}diKT-8V=$GZNOug@*R{8&HBpL(=5FkK+009C72oNAZpi=?=f2aJ8 V6Cgl<009C72oNAZfB=D31U|{RHAesd diff --git a/main.db.wal b/main.db.wal deleted file mode 100644 index b7189957e92aee9fabf4f8cac275dd125061b37b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10338 zcmeHNZA=_R7@mcbBlgId1okZI0XHZXtXW zTT){U8m$EtYpbd4pDHR*V^sl_fI zRD?!RaYAYJ2P3Ky)zxt%9Y@jxGR!Y@0!jGuHT0S@b3O69f4~138Oeq*LJz+F*ZKGR z&yV!VNFqS+I~|ahf{Y;<<;kcdsCa2zP3nX4M{9O?UDzz;pfIOpKo8**&S(OmULe#w zVPkW@1Do&K*c4ZYxf3~+(TYf$7S{ZMSS+Fj16`^X6`ydal89-*mD&=Bs{!GD%gE`3 z64at{+5tW?AR35@Yb{N*#gw=jR3W}vs_~^^#i`ixh2lzUn-c9JsNAWFR=v|N2h>QX zKcT~&>Ee(TsUdMC642FlaZFKM)#3W)x~o|9N^;*xj9?%V)ZiN86>eolnJ|9m;hvb1 zAUtrZDbNG)u%g?z#ShIPTVTSu4sW5?jn zYbMuCyKq~Cxz*w_sPz0)xr*ZSqPn$jh7Q+&;3-MD464P%Oqn%Vj5>J4YtpKu}H zoVWzXWJ{;Pth>-RyB~I-hI;dJ_%k+TDG> zec!&R*+rzwu}CL$FaZma>I)R@oo{~mpnmlaTV1w935!I9r4SMvK5Qm`)+INjkNYM* z?u9kr3?^nau;e7$T40P_Hw>0@1X66>mPtA-Dbm&v4U~rW1}a*w3_pBEMwLWfp*R6$ zH%p1*R`!}e=uXMIS1L%R$5$m*`S)>u^-Gd806(uu7FfOrZ^jhCWH)+XTQOD%yc6>m44<$z8WjK1N%*m* z*@%Lvwj*RZyL;#*pyq~K!pLS2!(4$@GMnE8bbh`y-wH{%07nj)%*8XzMR5CGG8arz zPJ<9yz8Z8rmAt;wv$r*Fo&jx7)jd>j4C53Ps@dAP8%`ZvRhJVgR%{kqsE8R!cIM9I zB^4Xwopk2NUQ8*llaLi=5;EA*9&>LOSOUR_Sx(w;n>MSB!FjNYhwgDQ3}z!h^a>2` zP9BU@%VEi^UkimWR71A`MH>CaRE zMqaUTnr%aq<5jpp8@IM?X{s{`P=_|$e0oT4(DeOb*S^X_xAz8I80I%K=6J#HNT5BL zj@0LDq~@AP%chCMs7dOznl0STY>-Wy-%Lezw&$yV!{7aMBkNDO6kr&+z7To8%z2Vr zUx>a{79D~ag_%7GaonaSS)#sBuH0J6zEG~*vf81!P7>2MXN{()9yZICsgfcR!dFtu Wv74P$!JLz}oV1bunfo6#$^Qeah#c7f diff --git a/marimo/database_creation.py b/marimo/database_creation.py deleted file mode 100644 index 4b8797f..0000000 --- a/marimo/database_creation.py +++ /dev/null @@ -1,158 +0,0 @@ -import marimo - -__generated_with = "0.16.5" -app = marimo.App(width="medium") - - -@app.cell -def _(): - import marimo as mo - import duckdb - return duckdb, mo - - -@app.cell -def _(duckdb): - con = duckdb.connect('main.db') - return (con,) - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - --CREATE SEQUENCE seq_clienti START 1; - --CREATE SEQUENCE seq_compilatori START 1; - --CREATE SEQUENCE seq_tipi_prodotti START 1; - --CREATE SEQUENCE seq_stati_ordini START 1; - --CREATE SEQUENCE seq_ordini START 1; - """, - engine=con - ) - return - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - CREATE OR REPLACE TABLE clienti ( - nome_cliente VARCHAR UNIQUE, - id_cliente INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_clienti') - ) - """, - engine=con - ) - return - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - CREATE OR REPLACE TABLE compilatori ( - nome_compilatore VARCHAR UNIQUE, - id_compilatore INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_compilatori') - ) - """, - engine=con - ) - return - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - - CREATE OR REPLACE TABLE tipi_prodotti ( - nome_tipo VARCHAR UNIQUE, - id_tipo INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_tipi_prodotti'), - luogo_applicazione VARCHAR, - espo_primaria VARCHAR, - espo_secondaria VARCHAR, - espo_nano VARCHAR, - supericie_cm2 INT, - frequenza INT, - qty_daily_stimata INT, - qty_daily_relativa INT, - ritenzione FLOAT, - espo_daily_calcolata INT, - espo_daily_relativa_calcolata INT, - peso INT, - target VARCHAR - ) - """, - engine=con - ) - return - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - - CREATE OR REPLACE TABLE stati_ordini ( - id_stato INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_stati_ordini'), - nome_stato VARCHAR - ) - """, - engine=con - ) - return (stati_ordini,) - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - - CREATE OR REPLACE TABLE ordini ( - id_ordine INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq_ordini'), - id_cliente INTEGER, - id_compilatore INTEGER, - id_tipo_prodotto INTEGER, - uuid_ordine VARCHAR NOT NULL, - uuid_progetto VARCHAR, - data_ordine DATETIME NOT NULL, - stato_ordine INTEGER DEFAULT 0, - note VARCHAR, - FOREIGN KEY (id_cliente) REFERENCES clienti(id_cliente), - FOREIGN KEY (id_compilatore) REFERENCES compilatori(id_compilatore), - FOREIGN KEY (id_tipo_prodotto) REFERENCES tipi_prodotti(id_tipo), - FOREIGN KEY (stato_ordine) REFERENCES stati_ordini(id_stato) - ) - """, - engine=con - ) - return - - -@app.cell -def _(con, mo): - _df = mo.sql( - f""" - - """, - engine=con - ) - return - - -@app.cell -def _(con, mo, stati_ordini): - _df = mo.sql( - f""" - INSERT INTO stati_ordini (nome_stato) VALUES ( - 'Ordine registrato', - '' - ) - """, - engine=con - ) - return - - -if __name__ == "__main__": - app.run() diff --git a/marimo/parsing_echa.py b/marimo/parsing_echa.py new file mode 100644 index 0000000..8743739 --- /dev/null +++ b/marimo/parsing_echa.py @@ -0,0 +1,194 @@ +import marimo + +__generated_with = "0.16.5" +app = marimo.App(width="medium") + + +@app.cell +def _(): + import marimo as mo + return + + +@app.cell +def _(): + from pif_compiler.services.srv_echa import orchestrator + return (orchestrator,) + + +@app.cell +def _(orchestrator): + result = orchestrator("57-55-6") + return (result,) + + +@app.cell +def _(result): + test = result['repeated_dose_toxicity'] + return (test,) + + +@app.cell +def _(result): + acute = result['acute_toxicity'] + acute + return (acute,) + + +@app.cell +def _(): + from pif_compiler.services.srv_echa import extract_levels, at_extractor, rdt_extractor + return at_extractor, extract_levels, rdt_extractor + + +@app.cell +def _(acute, at_extractor, extract_levels): + at = extract_levels(acute, at_extractor) + return (at,) + + +@app.cell +def _(at): + at + return + + +@app.cell +def _(extract_levels, rdt_extractor, test): + rdt = extract_levels(test, rdt_extractor) + return (rdt,) + + +@app.cell +def _(rdt): + rdt + return + + +@app.cell +def _(): + from pydantic import BaseModel + from typing import Optional + return BaseModel, Optional + + +@app.cell +def _(BaseModel, Optional): + class ToxIndicator(BaseModel): + indicator : str + value : int + unit : str + route : str + toxicity_type : Optional[str] = None + ref : Optional[str] = None + + @property + def priority_rank(self): + """Returns the numerical priority based on the toxicological indicator.""" + mapping = { + 'LD50': 1, + 'DL50': 1, + 'NOAEC': 2, + 'LOAEL': 3, + 'NOAEL': 4 + } + return mapping.get(self.indicator, 99) + + @property + def factor(self): + """Returns the factor based on the toxicity type.""" + if self.priority_rank == 1: + return 10 + elif self.priority_rank == 3: + return 3 + return 1 + return (ToxIndicator,) + + +@app.cell +def _(ToxIndicator, rdt): + lista = [] + for i in rdt: + tesss = rdt.get(i) + t = ToxIndicator(**tesss) + lista.append(t) + + lista + return + + +@app.cell +def _(): + from pydantic import model_validator + return (model_validator,) + + +@app.cell +def _( + BaseModel, + Optional, + ToxIndicator, + at_extractor, + extract_levels, + model_validator, + rdt_extractor, +): + class Toxicity(BaseModel): + cas: str + indicators: list[ToxIndicator] + best_case: Optional[ToxIndicator] = None + factor: Optional[int] = None + + @model_validator(mode='after') + def set_best_case(self) -> 'Toxicity': + if self.indicators: + self.best_case = max(self.indicators, key=lambda x: x.priority_rank) + self.factor = self.best_case.factor + return self + + @classmethod + def from_result(cls, cas: str, result): + toxicity_types = ['repeated_dose_toxicity', 'acute_toxicity'] + indicators_list = [] + + for tt in toxicity_types: + if tt not in result: + continue + + try: + extractor = at_extractor if tt == 'acute_toxicity' else rdt_extractor + fetch = extract_levels(result[tt], extractor=extractor) + + link = result.get(f"{tt}_link", "") + + for key, lvl in fetch.items(): + lvl['ref'] = link + elem = ToxIndicator(**lvl) + indicators_list.append(elem) + + except Exception as e: + print(f"Errore durante l'estrazione di {tt}: {e}") + continue + + return cls( + cas=cas, + indicators=indicators_list + ) + return (Toxicity,) + + +@app.cell +def _(Toxicity, result): + tox = Toxicity.from_result("57-55-6", result) + tox + return + + +@app.cell +def _(result): + result + return + + +if __name__ == "__main__": + app.run() diff --git a/marimo/test_obj.py b/marimo/test_obj.py new file mode 100644 index 0000000..3296ce6 --- /dev/null +++ b/marimo/test_obj.py @@ -0,0 +1,49 @@ +import marimo + +__generated_with = "0.16.5" +app = marimo.App(width="medium") + + +@app.cell +def _(): + from pif_compiler.classes.models import Esposition + return (Esposition,) + + +@app.cell +def _(Esposition): + it = Esposition( + preset_name="Test xzzx str: + return v.upper() + """, + name="_" +) + + +@app.cell +def _(CosmeticIngredient, collection, mo): + mo.stop(True) + try: + ingredient = CosmeticIngredient( + inci_name="Glycerin", + cas="56-81-5", + percentage=5.5 + ) + print(f"✅ Object Created: {ingredient}") + except ValueError as e: + print(f"❌ Validation Error: {e}") + + document_to_insert = ingredient.model_dump() + + result = collection.insert_one(document_to_insert) + return + + +@app.cell +def _( + BaseModel, + ConfigDict, + CosingInfo, + Field, + List, + Optional, + mo, + model_validator, + re, +): + mo.stop(True) + + + class DapInfo(BaseModel): + """Informazioni dal Dossier (es. origine, purezza)""" + origin: Optional[str] = None # es. "Synthetic", "Vegetable" + purity_percentage: Optional[float] = None + supplier_code: Optional[str] = None + + class ToxInfo(BaseModel): + """Dati Tossicologici""" + noael: Optional[float] = None # No Observed Adverse Effect Level + ld50: Optional[float] = None # Lethal Dose 50 + sed: Optional[float] = None # Systemic Exposure Dosage + mos: Optional[float] = None # Margin of Safety + + # --- 2. Modello Principale --- + + class CosmeticIngredient(BaseModel): + model_config = ConfigDict(validate_assignment=True) # Valida anche se modifichi i campi dopo + + # Gestione INCI multipli per lo stesso CAS + inci_names: List[str] = Field(default_factory=list) + + # Il CAS è una stringa obbligatoria, ma il regex dipende dal contesto + cas: str + + colorant: bool = Field(default=False) + organic: bool = Field(default=False) + + # Sotto-oggetti opzionali + dap: Optional[DapInfo] = None + cosing: Optional[CosingInfo] = None + tox_levels: Optional[ToxInfo] = None + + # --- VALIDAZIONE CONDIZIONALE CAS --- + @model_validator(mode='after') + def validate_cas_logic(self): + cas_value = self.cas + is_exempt = self.colorant or self.organic + + if not cas_value or not cas_value.strip(): + raise ValueError("Il campo CAS non può essere vuoto.") + + if not is_exempt: + cas_regex = r'^\d{2,7}-\d{2}-\d$' + if not re.match(cas_regex, cas_value): + raise ValueError(f"Formato CAS non valido ('{cas_value}') per ingrediente standard.") + + # Se è colorante/organico, accettiamo qualsiasi stringa (es. 'CI 77891' o codici interni) + return self + + # --- METODO HELPER PER AGGIUNGERE INCI --- + def add_inci(self, new_inci: str): + """Aggiunge un INCI alla lista solo se non è già presente (case insensitive).""" + new_inci_upper = new_inci.upper() + # Controlliamo se esiste già (normalizzando a maiuscolo per sicurezza) + if not any(existing.upper() == new_inci_upper for existing in self.inci_names): + self.inci_names.append(new_inci_upper) + print(f"✅ INCI '{new_inci_upper}' aggiunto.") + else: + print(f"ℹ️ INCI '{new_inci_upper}' già presente.") + return CosmeticIngredient, DapInfo + + +@app.cell +def _(): + from pif_compiler.services.srv_pubchem import pubchem_dap + + dato = pubchem_dap("56-81-5") + dato + return (dato,) + + +app._unparsable_cell( + r""" + molecular_weight = dato.get(\"MolecularWeight\") + log pow = dato.get(\"XLogP\") + topological_polar_surface_area = dato.get(\"TPSA\") + melting_point = dato.get(\"MeltingPoint\") + ionization = dato.get(\"Dissociation Constants\") + """, + name="_" +) + + +@app.cell(hide_code=True) +def _(mo): + mo.md( + r""" + Molecolar Weight >500 Da + High degree of ionisation + Log Pow ≤-1 or ≥ 4 + Topological polar surface area >120 Å2 + Melting point > 200°C + """ + ) + return + + +@app.cell +def _(BaseModel, Field, Optional, model_validator): + class DapInfo(BaseModel): + cas: str + + molecular_weight: Optional[float] = Field(default=None, description="In Daltons (Da)") + high_ionization: Optional[float] = Field(default=None, description="High degree of ionization") + log_pow: Optional[float] = Field(default=None, description="Partition coefficient") + tpsa: Optional[float] = Field(default=None, description="Topological polar surface area") + melting_point: Optional[float] = Field(default=None, description="In Celsius (°C)") + + # --- Il valore DAP Calcolato --- + # Lo impostiamo di default a 0.5 (50%), verrà sovrascritto dal validator + dap_value: float = 0.5 + + @model_validator(mode='after') + def compute_dap(self): + # Lista delle condizioni (True se la condizione riduce l'assorbimento) + conditions = [] + + # 1. MW > 500 Da + if self.molecular_weight is not None: + conditions.append(self.molecular_weight > 500) + + # 2. High Ionization (Se è True, riduce l'assorbimento) + if self.high_ionization is not None: + conditions.append(self.high_ionization is True) + + # 3. Log Pow <= -1 OR >= 4 + if self.log_pow is not None: + conditions.append(self.log_pow <= -1 or self.log_pow >= 4) + + # 4. TPSA > 120 Å2 + if self.tpsa is not None: + conditions.append(self.tpsa > 120) + + # 5. Melting Point > 200°C + if self.melting_point is not None: + conditions.append(self.melting_point > 200) + + # LOGICA FINALE: + # Se c'è almeno una condizione "sicura" (True), il DAP è 0.1 + if any(conditions): + self.dap_value = 0.1 + else: + self.dap_value = 0.5 + + return self + return (DapInfo,) + + +@app.cell +def _(DapInfo, dato, re): + desiderated_keys = ['CAS', 'MolecularWeight', 'XLogP', 'TPSA', 'Melting Point', 'Dissociation Constants'] + actual_keys = [key for key in dato.keys() if key in desiderated_keys] + + dict = {} + + for key in actual_keys: + if key == 'CAS': + dict['cas'] = dato[key] + if key == 'MolecularWeight': + mw = float(dato[key]) + dict['molecular_weight'] = mw + if key == 'XLogP': + log_pow = float(dato[key]) + dict['log_pow'] = log_pow + if key == 'TPSA': + tpsa = float(dato[key]) + dict['tpsa'] = tpsa + if key == 'Melting Point': + try: + for item in dato[key]: + if '°C' in item['Value']: + mp = dato[key]['Value'] + mp_value = re.findall(r"[-+]?\d*\.\d+|\d+", mp) + if mp_value: + dict['melting_point'] = float(mp_value[0]) + except: + continue + if key == 'Dissociation Constants': + try: + for item in dato[key]: + if 'pKa' in item['Value']: + pk = dato[key]['Value'] + pk_value = re.findall(r"[-+]?\d*\.\d+|\d+", mp) + if pk_value: + dict['high_ionization'] = float(mp_value[0]) + except: + continue + + dap_info = DapInfo(**dict) + dap_info + return + + +@app.cell +def _(): + from pif_compiler.services.srv_cosing import cosing_search, parse_cas_numbers, clean_cosing, identified_ingredients + return clean_cosing, cosing_search + + +@app.cell +def _(clean_cosing, cosing_search): + raw_cosing = cosing_search("72-48-0", 'cas') + cleaned_cosing = clean_cosing(raw_cosing) + cleaned_cosing + return cleaned_cosing, raw_cosing + + +@app.cell +def _(mo): + mo.md( + r""" + otherRestrictions + refNo + annexNo + casNo + functionName + """ + ) + return + + +@app.cell +def _(BaseModel, Field, List, Optional): + class CosingInfo(BaseModel): + cas : List[str] = Field(default_factory=list) + common_names : List[str] = Field(default_factory=list) + inci : List[str] = Field(default_factory=list) + annex : List[str] = Field(default_factory=list) + functionName : List[str] = Field(default_factory=list) + otherRestrictions : List[str] = Field(default_factory=list) + cosmeticRestriction : Optional[str] + return (CosingInfo,) + + +@app.cell +def _(CosingInfo): + def cosing_builder(cleaned_cosing): + cosing_keys = ['nameOfCommonIngredientsGlossary', 'casNo', 'functionName', 'annexNo', 'refNo', 'otherRestrictions', 'cosmeticRestriction', 'inciName'] + keys = [k for k in cleaned_cosing.keys() if k in cosing_keys] + + cosing_dict = {} + + for k in keys: + if k == 'nameOfCommonIngredientsGlossary': + names = [] + for name in cleaned_cosing[k]: + names.append(name) + cosing_dict['common_names'] = names + if k == 'inciName': + inci = [] + for inc in cleaned_cosing[k]: + inci.append(inc) + cosing_dict['inci'] = names + if k == 'casNo': + cas_list = [] + for casNo in cleaned_cosing[k]: + cas_list.append(casNo) + cosing_dict['cas'] = cas_list + if k == 'functionName': + functions = [] + for func in cleaned_cosing[k]: + functions.append(func) + cosing_dict['functionName'] = functions + if k == 'annexNo': + annexes = [] + i = 0 + for ann in cleaned_cosing[k]: + restriction = ann + ' / ' + cleaned_cosing['refNo'][i] + annexes.append(restriction) + i = i+1 + cosing_dict['annex'] = annexes + if k == 'otherRestrictions': + other_restrictions = [] + for ores in cleaned_cosing[k]: + other_restrictions.append(ores) + cosing_dict['otherRestrictions'] = other_restrictions + if k == 'cosmeticRestriction': + cosing_dict['cosmeticRestriction'] = cleaned_cosing[k] + + test_cosing = CosingInfo( + **cosing_dict + ) + return test_cosing + return (cosing_builder,) + + +@app.cell +def _(cleaned_cosing, cosing_builder): + id = cleaned_cosing['identifiedIngredient'] + if id: + for e in id: + obj = cosing_builder(e) + obj + return + + +@app.cell +def _(raw_cosing): + raw_cosing + return + + +@app.cell +def _(): + return + + +if __name__ == "__main__": + app.run() diff --git a/old/_old/echaFind.py b/old/_old/echaFind.py deleted file mode 100644 index 3feea79..0000000 --- a/old/_old/echaFind.py +++ /dev/null @@ -1,245 +0,0 @@ -import requests -import urllib.parse -import re as standardre -import logging -import json -from bs4 import BeautifulSoup - - -# Settings per il logging -logging.basicConfig( - format="{asctime} - {levelname} - {message}", - style="{", - datefmt="%Y-%m-%d %H:%M", - filename="echa.log", - encoding="utf-8", - filemode="a", - level=logging.INFO, -) - -# Funzione inutile -def getCas(substance, ): - results = {} - req_0 = requests.get( - "https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText=" - + urllib.parse.quote(substance) - ) - req_0_json = req_0.json() - try: - rmlId = req_0_json["items"][0]["substanceIndex"]["rmlId"] - rmlName = req_0_json["items"][0]["substanceIndex"]["rmlName"] - rmlCas = req_0_json["items"][0]["substanceIndex"]["rmlCas"] - - results["rmlId"] = rmlId - results["rmlName"] = rmlName - results["rmlCas"] = rmlCas - except: - return False - return results - - - - -# Funzione per cercare il dossier dato in input un CAS, una sostanza o un EN -def search_dossier(substance, input_type='rmlCas'): - results = {} - # Il dizionario che farò tornare alla fine - - # Prima parte. Ottengo rmlID e rmlName - # st.code('https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText='+substance) - req_0 = requests.get( - "https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText=" - + urllib.parse.quote(substance) - ) - - logging.info(f'echaFind.search_dossier(). searching "{substance}"') - - #'La prima cosa da fare è fare una ricerca con il nome della sostanza ma trasformata attraverso urllib' - req_0_json = req_0.json() - try: - # Estraggo i campi che mi servono dalla response - rmlId = req_0_json["items"][0]["substanceIndex"]["rmlId"] - rmlName = req_0_json["items"][0]["substanceIndex"]["rmlName"] - rmlCas = req_0_json["items"][0]["substanceIndex"]["rmlCas"] - rmlEc = req_0_json["items"][0]["substanceIndex"]["rmlEc"] - - results['search_response'] = f"https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText={urllib.parse.quote(substance)}" - results["rmlId"] = rmlId - results["rmlName"] = rmlName - results["rmlCas"] = rmlCas - results["rmlEc"] = rmlEc - - logging.info( - f"echaFind.search_dossier(). found substance on ECHA. rmlId: '{rmlId}', rmlName: '{rmlName}', rmlCas: '{rmlCas}'" - ) - except: - logging.info( - f"echaFind.search_dossier(). could not find substance for '{substance}'" - ) - return False - - # Update: in certi casi poteva verificarsi che inserendo un CAS si trovasse invece una sostanza con codice EN uguale al CAS in input. - # Ora controllo che la sostanza trovata abbia effettivamente un CAS uguale a quello inserito in input. - # è inoltre possibile cercare per rmlName (nome della sostanza) o EN (rmlEn): basta specificare in input_type per cosa si sta cercando - if results[input_type] != substance: - logging.error(f'echa.echaFind.search_dossier(): results[{input_type}] "{results[input_type]}is not equal to "{substance}". ') - return f'search_error. results[{input_type}] ("{results[input_type]}") is not equal to "{substance}". Maybe you specified the wrong input_type. Check the results here: https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText={urllib.parse.quote(substance)}' - - # Seconda parte. Cerco sul sito ECHA dei dossiers creando un link con l'ID precedentemente ottenuto. - req_1_url = ( - "https://chem.echa.europa.eu/api-dossier-list/v1/dossier?pageIndex=1&pageSize=100&rmlId=" - + rmlId - + "®istrationStatuses=Active" - ) # Prima cerco negli active. - - req_1 = requests.get(req_1_url) - req_1_json = req_1.json() - - # Se non esistono dossiers attivi cerco quelli inattivi - if req_1_json["items"] == []: - logging.info( - f"echaFind.search_dossier(). could not find active dossier for '{substance}'. Proceeding to search in the unactive ones." - ) - req_1_url = ( - "https://chem.echa.europa.eu/api-dossier-list/v1/dossier?pageIndex=1&pageSize=100&rmlId=" - + rmlId - + "®istrationStatuses=Inactive" - ) - req_1 = requests.get(req_1_url) - req_1_json = req_1.json() - if req_1_json["items"] == []: - logging.info( - f"echaFind.search_dossier(). could not find unactive dossiers for '{substance}'" - ) # Non ho trovato nè dossiers inattivi che attivi - return False - else: - logging.info( - f"echaFind.search_dossier(). found unactive dossiers for '{rmlName}'" - ) - results["dossierType"] = "Inactive" - - else: - logging.info( - f"echaFind.search_dossier(). found active dossiers for '{substance}'" - ) - results["dossierType"] = "Active" - - # Queste erano le due robe che mi servivano - assetExternalId = req_1_json["items"][0]["assetExternalId"] - - # UPDATE: Per ottenere la data dell'ultima modifica - try: - lastUpdateDate = req_1_json["items"][0]["lastUpdatedDate"] - datetime_object = datetime.fromisoformat(lastUpdateDate.replace('Z', '+00:00')) # Handle 'Z' if present, else it might break on older python versions - lastUpdateDate = datetime_object.date().isoformat() - results['lastUpdateDate'] = lastUpdateDate - except: - logging.error(f"echa.echaFind(). Could not find lastUpdateDate for the dossier") - - rootKey = req_1_json["items"][0]["rootKey"] - - # Terza parte. Ottengo assetExternalId - # "Con l'assetExternalId è possibile arrivare alla pagina principale del dossier." - # "Da questa pagina bisogna scrappare l'ID del riassunto tossicologico, :red[**SE ESISTE**]" - results["index"] = ( - "https://chem.echa.europa.eu/html-pages/" + assetExternalId + "/index.html" - ) - results["index_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}" - ) - - req_2 = requests.get( - "https://chem.echa.europa.eu/html-pages/" + assetExternalId + "/index.html" - ) - index = BeautifulSoup(req_2.text, "html.parser") - index.prettify() - - # Quarta parte. Ottengo l'ID del riassunto tossicologico dall'index.html - # "In tutto quell'HTML ci interessa solo un div. BeautifulSoup ha problemi se ci sono troppi div innestati. Quindi uso una combinazione di quello e di Regex" - - div = index.find_all("div", id=["id_7_Toxicologicalinformation"]) - str_div = str(div) - str_div = str_div.split("")[0] - - uic_found = False - if type(standardre.search('href="([^"]+)"', str_div)).__name__ == "NoneType": - # Un regex per trovare l'href che mi serve - logging.info( - f"echaFind.search_dossier(). Could not find 'id_7_Toxicologicalinformation' in the body" - ) - else: - UIC = standardre.search('href="([^"]+)"', str_div).group(1) - uic_found = True - - # Per l'acute toxicity - acute_toxicity_found = False - div_acute_toxicity = index.find_all("div", id=["id_72_AcuteToxicity"]) - if div_acute_toxicity: - for div in div_acute_toxicity: - try: - a = div.find_all("a", href=True)[0] - acute_toxicity_id = standardre.search('href="([^"]+)"', str(a)).group(1) - acute_toxicity_found = True - except: - logging.info( - f"echaFind.search_dossier(). No acute_toxicity_id found from index for {substance}" - ) - - # Per il repeated dose - repeated_dose_found = False - div_repeated_dose = index.find_all("div", id=["id_75_Repeateddosetoxicity"]) - if div_repeated_dose: - for div in div_repeated_dose: - try: - a = div.find_all("a", href=True)[0] - repeated_dose_id = standardre.search('href="([^"]+)"', str(a)).group(1) - repeated_dose_found = True - except: - logging.info( - f"echaFind.search_dossier(). No repeated_dose_id found from index for {substance}" - ) - - # Quinta parte. Recupero l'html del dossier tossicologico e faccio ritornare il content - - if acute_toxicity_found: - acute_toxicity_link = ( - "https://chem.echa.europa.eu/html-pages/" - + assetExternalId - + "/documents/" - + acute_toxicity_id - + ".html" - ) - results["AcuteToxicity"] = acute_toxicity_link - results["AcuteToxicity_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}/{acute_toxicity_id}" - ) - - if uic_found: - # UIC è l'id del toxsummary - final_url = ( - "https://chem.echa.europa.eu/html-pages/" - + assetExternalId - + "/documents/" - + UIC - + ".html" - ) - results["ToxSummary"] = final_url - results["ToxSummary_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}/{UIC}" - ) - - if repeated_dose_found: - results["RepeatedDose"] = ( - "https://chem.echa.europa.eu/html-pages/" - + assetExternalId - + "/documents/" - + repeated_dose_id - + ".html" - ) - results["RepeatedDose_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}/{repeated_dose_id}" - ) - - json_formatted_str = json.dumps(results) - logging.info(f"echaFind.search_dossier() OK. output: {json_formatted_str}") - return results diff --git a/old/_old/echaProcess.py b/old/_old/echaProcess.py deleted file mode 100644 index fd4783f..0000000 --- a/old/_old/echaProcess.py +++ /dev/null @@ -1,946 +0,0 @@ -from src.func.echaFind import search_dossier -from bs4 import BeautifulSoup -from markdownify import MarkdownConverter -import pandas as pd -import requests -import os -import re -import markdown_to_json -import json -import copy -import unicodedata -from datetime import datetime -import logging -import duckdb - -# Settings per il logging -logging.basicConfig( - format="{asctime} - {levelname} - {message}", - style="{", - datefmt="%Y-%m-%d %H:%M", - filename="echa.log", - encoding="utf-8", - filemode="a", - level=logging.INFO, -) - -try: - # Carico il full scraping in memoria se esiste - con = duckdb.connect() - os.chdir(".") - res = con.sql(""" - CREATE TABLE echa_full_scraping AS - SELECT * FROM read_csv_auto('src\data\echa_full_scraping.csv'); - """) - logging.info( - f"echa.echaProcess().main: Loaded echa scraped data into duckdb memory. First CAS in the df is: {con.sql('select CAS from echa_full_scraping limit 1').fetchone()[0]}" - ) - local_echa = True -except: - logging.error(f"echa.echaProcess().main: No local echa scraped data found") - - -# Metodo per trovare le informazioni relative sul sito echa -# Funziona sia con il nome della sostanza che con il CUS -def openEchaPage(link, local=False): - try: - if local: - page = open(link, encoding="utf8") - soup = BeautifulSoup(page, "html.parser") - else: - page = requests.get(link) - page.encoding = "utf-8" - soup = BeautifulSoup(page.text, "html.parser") - except: - logging.error( - f"echa.echaProcess.openEchaPage() error. could not open: '{link}'", - exc_info=True, - ) - return soup - - -# Metodo per trasformare la pagina dell'echa in un Markdown -def echaPage_to_md(sezione, scrapingType=None, local=False, substance=None): - # sezione : il soup della pagina estratta attraverso search_dossier - # scrapingType : 'RepeatedDose' o 'AcuteToxicity' - # local : se vuoi salvare il contenuto del markdown in locale. Utile per debuggare - # substance : il nome della sostanza. Per salvarla nel path corretto - - # Create shorthand method for conversion - def md(soup, **options): - return MarkdownConverter(**options).convert_soup(soup) - - output = md(sezione) - # Trasformo la section html in un markdown, che però va corretto. - - # Cambia un po' il modo in cui modifico il .md in base al tipo di pagina da scrappare - # aggiungo eccezioni man mano che testo nuove sostanze - if scrapingType == "RepeatedDose": - output = output.replace("### Oral route", "#### oral") - output = output.replace("### Dermal", "#### dermal") - output = output.replace("### Inhalation", "#### inhalation") - # Devo rimpiazzare >< con delle stringhe perchè sennò il jsonifier interpreta quei due simboli come dei codici e inserisce il testo nelle [] - output = re.sub(r">\s+", "greater than ", output) - # Replace '<' followed by whitespace with 'less than ' - output = re.sub(r"<\s+", "less than ", output) - output = re.sub(r">=\s*\n", "greater or equal than ", output) - output = re.sub(r"<=\s*\n", "less or equal than ", output) - - elif scrapingType == "AcuteToxicity": - # Devo rimpiazzare >< con delle stringhe perchè sennò il jsonifier interpreta quei due simboli come dei codici e inserisce il testo nelle [] - output = re.sub(r">\s+", "greater than ", output) - # Replace '<' followed by whitespace with 'less than ' - output = re.sub(r"<\s+", "less than ", output) - output = re.sub(r">=\s*\n", "greater or equal than", output) - output = re.sub(r"<=\s*\n", "less or equal than ", output) - - output = output.replace("–", "-") - - output = re.sub(r"\s+mg", " mg", output) - # sta parte serve per fixare le unità di misura che vanno a capo e sono separate dal valore - - if local and substance: - path = f"{scrapingType}/mds/{substance}.md" - os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, "w") as text_file: - text_file.write(output) - - return output - - - -# Questo è la parte 2 del processing del sito ECHA. Va trasformato il markdown in un JSON -def markdown_to_json_raw(output, scrapingType=None, local=False, substance=None): - # Output: Il markdown - # scrapingType : 'RepeatedDose' o 'AcuteToxicity' - # substance : il nome della sostanza. Per salvarla nel path corretto - jsonified = markdown_to_json.jsonify(output) - dictified = json.loads(jsonified) - - # Salvo il json iniziale così come esce da jsonify - if local and scrapingType and substance: - path = f"{scrapingType}/jsons/raws/{substance}_raw0.json" - os.makedirs(os.path.dirname(path), exist_ok=True) - - with open(path, "w") as text_file: - text_file.write(jsonified) - - # Ora splitto i contenuti dei dizionari innestati. - for key, value in dictified.items(): - if type(value) == dict: - for key2, value2 in value.items(): - parts = value2.split("\n\n") - dictified[key][key2] = { - parts[i]: parts[i + 1] - for i in range(0, len(parts) - 1, 2) - if parts[i + 1] != "[Empty]" - } - else: - parts = value.split("\n\n") - dictified[key] = { - parts[i]: parts[i + 1] - for i in range(0, len(parts) - 1, 2) - if parts[i + 1] != "[Empty]" - } - - jsonified = json.dumps(dictified) - - if local and scrapingType and substance: - path = f"{scrapingType}/jsons/raws/{substance}_raw1.json" - os.makedirs(os.path.dirname(path), exist_ok=True) - - with open(path, "w") as text_file: - text_file.write(jsonified) - - dictified = json.loads(jsonified) - - return jsonified - - -# Metodo creato da claude per risolvere i problemi di unicode characters -def normalize_unicode_characters(text): - """ - Normalize Unicode characters, with special handling for superscript - """ - if not isinstance(text, str): - return text - - # Specific replacements for common Unicode encoding issues - # e per altre eccezioni particolari - replacements = { - "\u00c2\u00b2": "²", # ² -> ² - "\u00c2\u00b3": "³", # ³ -> ³ - "\u00b2": "²", # Bare superscript 2 - "\u00b3": "³", # Bare superscript 3 - "\n": "", # ogni tanto ci sono degli \n brutti da togliere - "greater than": ">", - "less than": "<", - "greater or equal than": ">=", - "less or equal than": "<", - # Ste due entry le ho messe io. >< creano problemi quindi le rinonimo temporaneamente - } - - # Apply specific replacements first - for old, new in replacements.items(): - text = text.replace(old, new) - - # Normalize Unicode characters - text = unicodedata.normalize("NFKD", text) - - return text - - -# Un'altro metodo creato da Claude. -# Pare che il mio cervello sia troppo piccolo per riuscire a ciclare ricursivamente -# un dizionario innestato. Se magari avessimo fatto algoritmi e strutture dati... -def clean_json(data): - """ - Recursively clean JSON by removing empty/uninformative entries - and normalizing Unicode characters - """ - - def is_uninformative(value, context=None): - """ - Check if a dictionary entry is considered uninformative - - Args: - value: The value to check - context: Additional context about where the value is located - """ - # Specific exceptions - if context and context == "Key value for chemical safety assessment": - # Always keep all entries in this specific section - return False - - uninformative_values = ["hours/week", "", None] - - return value in uninformative_values or ( - isinstance(value, str) - and ( - value.strip() in uninformative_values - or value.lower() == "no information available" - ) - ) - - def clean_recursive(obj, context=None): - # If it's a dictionary, process its contents - if isinstance(obj, dict): - # Create a copy to modify - cleaned = {} - for key, value in obj.items(): - # Normalize key - normalized_key = normalize_unicode_characters(key) - - # Set context for nested dictionaries - new_context = context or normalized_key - - # Recursively clean nested structures - cleaned_value = clean_recursive(value, new_context) - - # Conditions for keeping the entry - keep_entry = ( - cleaned_value not in [None, {}, ""] - and not ( - isinstance(cleaned_value, dict) and len(cleaned_value) == 0 - ) - and not is_uninformative(cleaned_value, new_context) - ) - - # Add to cleaned dict if conditions are met - if keep_entry: - cleaned[normalized_key] = cleaned_value - - return cleaned if cleaned else None - - # If it's a list, clean each item - elif isinstance(obj, list): - cleaned_list = [clean_recursive(item, context) for item in obj] - cleaned_list = [item for item in cleaned_list if item not in [None, {}, ""]] - return cleaned_list if cleaned_list else None - - # For strings, normalize Unicode - elif isinstance(obj, str): - return normalize_unicode_characters(obj) - - # Return as-is for other types - return obj - - # Create a deep copy to avoid modifying original data - cleaned_data = clean_recursive(copy.deepcopy(data)) - # Sì figa questa è la parte che mi ha fatto sclerare - # Ciclare in dizionari innestati senza poter modificare la struttura - return cleaned_data - - -def json_to_dataframe(cleaned_json, scrapingType): - rows = [] - schema = { - "RepeatedDose": [ - "Substance", - "CAS", - "Toxicity Type", - "Route", - "Dose descriptor", - "Effect level", - "Species", - "Extraction_Timestamp", - "Endpoint conclusion", - ], - "AcuteToxicity": [ - "Substance", - "CAS", - "Route", - "Endpoint conclusion", - "Dose descriptor", - "Effect level", - "Extraction_Timestamp", - ], - } - if scrapingType == "RepeatedDose": - # Iterate through top-level sections (excluding 'Key value for chemical safety assessment') - for toxicity_type, routes in cleaned_json.items(): - if toxicity_type == "Key value for chemical safety assessment": - continue - - # Iterate through routes within each toxicity type - for route, details in routes.items(): - row = {"Toxicity Type": toxicity_type, "Route": route} - - # Add details to the row, excluding 'Link to relevant study record(s)' - row.update( - { - k: v - for k, v in details.items() - if k != "Link to relevant study record(s)" - } - ) - rows.append(row) - elif scrapingType == "AcuteToxicity": - for toxicity_type, routes in cleaned_json.items(): - if ( - toxicity_type == "Key value for chemical safety assessment" - or not routes - ): - continue - - row = { - "Route": toxicity_type.replace("Acute toxicity: via", "") - .replace("route", "") - .strip() - } - - # Add details directly from the routes dictionary - row.update( - { - k: v - for k, v in routes.items() - if k != "Link to relevant study record(s)" - } - ) - rows.append(row) - - # Create DataFrame - df = pd.DataFrame(rows) - - # Last moment fixes. Per forzare uno schema - fair_columns = list(set(schema["RepeatedDose"] + schema["AcuteToxicity"])) - df = df = df.loc[:, df.columns.intersection(fair_columns)] - return df - - -def save_dataframe(df, file_path, scrapingType, schema): - """ - Save DataFrame with strict column requirements. - - Args: - df (pd.DataFrame): DataFrame to potentially append - file_path (str): Path of CSV file - """ - # Mandatory columns for saved DataFrame - - saved_columns = schema[scrapingType] - - # Check if input DataFrame has at least Dose Descriptor and Effect Level - if not all(col in df.columns for col in ["Effect level"]): - return - - # If file exists, read it to get saved columns - if os.path.exists(file_path): - existing_df = pd.read_csv(file_path) - - # Reindex to match saved columns, filling missing with NaN - df = df.reindex(columns=saved_columns) - else: - # If file doesn't exist, create DataFrame with saved columns - df = df.reindex(columns=saved_columns) - - df = df[df["Effect level"].isna() == False] - # Ignoro le righe che non hanno valori per Effect Level - - # Append or save the DataFrame - df.to_csv( - file_path, - mode="a" if os.path.exists(file_path) else "w", - header=not os.path.exists(file_path), - index=False, - ) - - -def echaExtract( - substance: str, - scrapingType: str, - outputType="df", - key_infos=False, - local_search=False, - local_only = False -): - """ - Funzione principale per scrapare dal sito ECHA. Mette insieme tante funzioni diverse di ricerca, estrazione e pulizia. - Registra il logging delle operazioni. - - Args: - substance (str): CAS o nome della sostanza. Vanno bene entrambi ma il CAS funziona meglio. - scrapingType (str): 'AcuteToxicity' (LD50) o 'RepeatedDose' (NOAEL) - outputType (str): 'pd.DataFrame' o 'json' (sconsigliato) - key_infos (bool): Di base True. Specifica se cercare la sezione "Description of Key Information" nei dossiers. - Certe sostanze hanno i dati inseriti a cazzo e mettono le informazioni lì in forma discorsiva al posto che altrove. - - Output: - un dataframe o un json, - f"Non esistono lead dossiers attivi o inattivi per {substance}" - """ - - # se local_search = True tento una ricerca in locale. Altrimenti la provo online. - if local_search and local_echa: - result = echaExtract_local(substance, scrapingType, key_infos) - - if not result.empty: - logging.info( - f"echa.echaProcess.echaExtract(): Found local data for {scrapingType}, {substance}. Returning it." - ) - return result - elif result.empty: - logging.info( - f"echa.echaProcess.echaExtract(): Have not found local data for {scrapingType}, {substance}. Continuining." - ) - if local_only: - logging.info(f'echa.echaProcess.echaExtract(): No data found in local-only search for {substance}, {scrapingType}') - return f'No data found in local-only search for {substance}, {scrapingType}' - - try: - # con search_dossier trovo le informazioni relative al dossiers cercando sul sito echa la sostanza fornita. - links = search_dossier(substance) - if not links: - logging.info( - f'echaProcess.echaExtract(). no active or unactive lead dossiers for: "{substance}". Ending extraction.' - ) - return f"Non esistono lead dossiers attivi o inattivi per {substance}" - # Se non esistono LEAD dossiers (quelli con i riassunti tossicologici) attivi o inattivi - - # Se esistono, apro la pagina che mi interessa ('Acute Toxicity' o 'Repeated Dose') - - if not scrapingType in list(links.keys()): - logging.info( - f'echaProcess.echaExtract(). No page for "{scrapingType}", "{substance}"' - ) - return f'No data in "{scrapingType}", "{substance}". Page does not exist.' - - soup = openEchaPage(link=links[scrapingType]) - logging.info( - f"echaProcess.echaExtract(). soupped '{scrapingType}' echa page for '{substance}'" - ) - - # Piglio la sezione che mi serve - try: - sezione = soup.find( - "section", - class_="KeyValueForChemicalSafetyAssessment", - attrs={"data-cy": "das-block"}, - ) - except: - logging.error( - f'echaProcess.echaExtract(). could not extract the "section" for "{scrapingType}" for "{substance}"', - exc_info=True, - ) - - # Per ottenere il timestamp attuale - now = datetime.now() - - # UPDATE. Cerco le key infos - key_infos_faund = False - if key_infos: - try: - key_infos = soup.find( - "section", - class_="KeyInformation", - attrs={"data-cy": "das-block"}, - ) - if key_infos: - key_infos = key_infos.find( - "div", - class_="das-field_value das-field_value_html", - ) - key_infos = key_infos.text - key_infos = key_infos if key_infos.strip() != "[Empty]" else None - if key_infos: - key_infos_faund = True - logging.info( - f"echaProcess.echaExtract(). Extracted key_infos from '{scrapingType}' echa page for '{substance}': {key_infos}" - ) - key_infos_df = pd.DataFrame(index=[0]) - key_infos_df["key_information"] = key_infos - key_infos_df = df_wrapper( - df=key_infos_df, - rmlName=links["rmlName"], - rmlCas=links["rmlCas"], - timestamp=now.strftime("%Y-%m-%d"), - dossierType=links["dossierType"], - page=scrapingType, - linkPage=links[scrapingType], - key_infos=True, - ) - else: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not extract key_infos for "{scrapingType}", "{substance}"' - ) - else: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not extract key_infos for "{scrapingType}", "{substance}"' - ) - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not extract key_infos for "{scrapingType}", "{substance}"', - exc_info=True, - ) - - try: - if not sezione: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() Empty section for the html > markdown conversion. No data for "{scrapingType}", "{substance}"' - ) - if not key_infos_faund: - # Se non ci sono dati ma ci sono le key informations ritorno quelle - return f'No data in "{scrapingType}", "{substance}"' - else: - return key_infos_df - - # Trasformo la sezione html in markdown - output = echaPage_to_md( - sezione, scrapingType=scrapingType, substance=substance - ) - logging.info( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() OK. created MD for "{scrapingType}", "{substance}"' - ) - - # Ci sono rari casi in cui proprio non esistono pagine per l'acute toxicity o la repeated dose. In quel caso output sarà vuoto e darà errore - # logging.info(output) - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not MD for "{scrapingType}", "{substance}"', - exc_info=True, - ) - - try: - # Trasformo il markdown nel primo json raw - jsonified = markdown_to_json_raw( - output, scrapingType=scrapingType, substance=substance - ) - logging.info( - f'echaProcess.echaExtract() > echaProcess.markdown_to_json_raw() OK. created initial json for "{scrapingType}", "{substance}"' - ) - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.markdown_to_json_raw() ERROR. could not create initial json for "{scrapingType}", "{substance}"', - exc_info=True, - ) - - json_data = json.loads(jsonified) - - try: - # Secondo step per il processing del json: pulisco i dizionari piu' innestati - cleaned_data = clean_json(json_data) - logging.info( - f'echaProcess.echaExtract() > echaProcess.clean_json() OK. cleaned the json for "{scrapingType}", "{substance}"' - ) - # Se cleaned_data è vuoto vuol dire che non ci sono dati - if not cleaned_data: - logging.error( - f'echaProcess.echaExtract() > echaProcess.clean_json() Empty cleaned_json. No data for "{scrapingType}", "{substance}"' - ) - if not key_infos_faund: - # Se non ci sono dati ma ci sono le key informations ritorno quelle - return f'No data in "{scrapingType}", "{substance}"' - else: - return key_infos_df - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.clean_json() ERROR. cleaning the json for "{scrapingType}", "{substance}"' - ) - - # Se si vuole come output il dataframe creo un dataframe e ci aggiungo un timestamp - try: - df = json_to_dataframe(cleaned_data, scrapingType) - df = df_wrapper( - df=df, - rmlName=links["rmlName"], - rmlCas=links["rmlCas"], - timestamp=now.strftime("%Y-%m-%d"), - dossierType=links["dossierType"], - page=scrapingType, - linkPage=links[scrapingType], - ) - - if outputType == "df": - logging.info( - f'echaProcess.echaExtract(). succesfully extracted "{scrapingType}", "{substance}". Returning df' - ) - - # Se l'utente vuole le key infos e le key_infos sono state trovate unisco i due df - return df if not key_infos_faund else pd.concat([key_infos_df, df]) - - elif outputType == "json": - if key_infos_faund: - df = pd.concat([key_infos_df, df]) - jayson = df.to_json(orient="records", force_ascii=False) - logging.info( - f'echaProcess.echaExtract(). succesfully extracted "{scrapingType}", "{substance}". Returning json' - ) - return jayson - except KeyError: - # Per gestire le pagine di merda che hanno solo "no information available" - - if key_infos_faund: - return key_infos_df - - json_output = list(cleaned_data[list(cleaned_data.keys())[0]].values()) - if json_output == ["no information available" for elem in json_output]: - logging.info( - f"echaProcess.echaExtract(). No data found for {scrapingType} for {substance}" - ) - return f'No data in "{scrapingType}", "{substance}"' - else: - logging.error( - f"echaProcess.json_to_dataframe(). Could not create dataframe" - ) - cleaned_data["error"] = ( - "Non sono riuscito a creare il dataframe, probabilmente non ci sono abbastanza informazioni. Ritorno il JSON" - ) - return cleaned_data - - except Exception: - logging.error( - f"echaProcess.echaExtract() ERROR. Something went wrong, not quite sure what.", - exc_info=True, - ) - - -def df_wrapper( - df, rmlName, rmlCas, timestamp, dossierType, page, linkPage, key_infos=False -): - # Un semplice metodo per aggiungere tutta la roba che ci serve al dataframe. - # Per non intasare echaExtract che già di suo è un figa di bordello - df.insert(0, "Substance", rmlName) - df.insert(1, "CAS", rmlCas) - df["Extraction_Timestamp"] = timestamp - df = df.replace("\n", "", regex=True) - if not key_infos: - df = df[df["Effect level"].isnull() == False] - - # Aggiungo il link del dossier e lo status - df["dossierType"] = dossierType - df["page"] = page - df["linkPage"] = linkPage - return df - -def echaExtract_specific( - CAS: str, - scrapingType="RepeatedDose", - doseDescriptor="NOAEL", - route="inhalation", - local_search=False, - local_only=False -): - """ - Dato un CAS cerca di trovare il dose descriptor (di base NOAEL) per la route specificata (di base 'inhalation'). - - Args: - CAS (str): il cas o in alternativa la sostanza - route (str): 'inhalation', 'oral', 'dermal'. Di base 'inhalation' - scrapingType (str): la pagina su cui cercarlo - doseDescriptor (str): il tipo di valore da ricercare (NOAEL, DNEL, LD50, LC50) - """ - - # Tento di estrarre - result = echaExtract( - substance=CAS, - scrapingType=scrapingType, - outputType="df", - local_search=local_search, - local_only=local_only - ) - - # Il risultato è un dataframe? - if type(result) == pd.DataFrame: - # Se sì, lo filtro per ciò che mi interessa - filtered_df = result[ - (result["Route"] == route) & (result["Dose descriptor"] == doseDescriptor) - ] - # Se non è vuoto lo ritorno - if not filtered_df.empty: - return filtered_df - else: - return f'Non ho trovato {doseDescriptor} in {scrapingType} con route "{route}" per {CAS}' - - elif type(result) == dict and result["error"]: - # Questo significa che gli è arrivato qualche json con un errore - return f'Non ho trovato {doseDescriptor} in {scrapingType} con route "{route}" per {CAS}' - - # Questo significa che ha ricevuto un "Non esistono" come risultato. Non esistono lead dossiers attivi o inattivi per la sostanza ricercata - elif result.startswith("Non esistono"): - return result - - -def echa_noael_ld50(CAS: str, route="inhalation", outputType="df", local_search=False, local_only=False): - """ - Dato un CAS cerca di trovare il NOAEL per la route specificata (di base 'inhalation'). - Se non esiste la pagina RepeatedDose con il NOAEL fa ritornare l'LD50 per quella route. - - Args: - CAS (str): il cas o in alternativa la sostanza - route (str): 'inhalation', 'oral', 'dermal'. Di base 'inhalation' - outputType (str) = 'df', 'json'. Il tipo di output - - """ - if route not in ["inhalation", "oral", "dermal"] and outputType not in [ - "df", - "json", - ]: - return "invalid input" - # Di base cerco di scrapare la pagina "Repeated Dose" - first_attempt = echaExtract_specific( - CAS=CAS, - scrapingType="RepeatedDose", - doseDescriptor="NOAEL", - route=route, - local_search=local_search, - local_only=local_only - ) - - if isinstance(first_attempt, pd.DataFrame): - return first_attempt - elif isinstance(first_attempt, str) and first_attempt.startswith("Non ho trovato"): - second_attempt = echaExtract_specific( - CAS=CAS, - scrapingType="AcuteToxicity", - doseDescriptor="LD50", - route=route, - local_search=True, - local_only=local_only - ) - if isinstance(second_attempt, pd.DataFrame): - return second_attempt - elif isinstance(second_attempt, str) and second_attempt.startswith( - "Non ho trovato" - ): - return second_attempt.replace("LD50", "NOAEL ed LD50") - elif first_attempt.startswith("Non esistono"): - return first_attempt - - -def echa_noael_ld50_multi( - casList: list, route="inhalation", messages=False, local_search=False, local_only=False -): - """ - Metodo abbastanza semplice. Data una lista di cas esegue echa_noael_ld50. Quindi cerca i NOAEL per la route desiderata o gli LD50 se non trova i NOAEL. - L'output è un df per le sostanze che trova e una lista di messaggi per quelle che non trova. - - Args: - casList (list): la lista di CAS - route (str): 'inhalation', 'oral', 'dermal'. Di base 'inhalation' - messages (boolean) = True o False. Con True fa ritornare una lista. Il primo elemento sarà il dataframe, il secondo la lista di messaggi per le sostanze non trovate. - Di base è False e fa ritornare solo il dataframe. - """ - messages_list = [] - df = pd.DataFrame() - for CAS in casList: - output = echa_noael_ld50( - CAS=CAS, route=route, outputType="df", local_search=local_search, local_only=local_only - ) - if isinstance(output, str): - messages_list.append(output) - elif isinstance(output, pd.DataFrame): - df = pd.concat([df, output], ignore_index=True) - df.dropna(axis=1, how="all", inplace=True) - if messages and df.empty: - messages_list.append( - f'Non sono riuscito a trovare nessun NOAEL o LD50 per i cas per la route "{route}"' - ) - return [None, messages_list] - elif messages and not df.empty: - return [df, messages_list] - elif not df.empty and not messages: - return df - elif df.empty and not messages: - return f'Non sono riuscito a trovare nessun NOAEL o LD50 per i cas per la route "{route}"' - - -def echaExtract_multi( - casList: list, - scrapingType="all", - local=False, - local_path=None, - log_path=None, - debug_print=False, - error=False, - error_path=None, - key_infos=False, - local_search=False, - local_only=False, - filter = None -): - """ - Data una lista di CAS cerca di estrarre tutte le pagine con le Repeated Dose, tutte le pagine con l'AcuteToxicity, o entrambe - - Args: - casList (list): la lista di CAS - scrapingType (str): 'RepeatedDose', 'AcuteToxicity', 'all' - local (boolean): Se impostato su True questo parametro salva sul disco in maniera progressiva, appendendo ogni result man mano che li trova. - è necessario per lo scraping su larga scala - log_path (str): il path per il log da fillare durante lo scraping di massa - debug_print (bool): per avere il printing durante lo scraping. per verificare l'avanzamento - error (bool): Per far ritornare la lista degli errori una volta scrapato - - Output: - pd.Dataframe - """ - cas_len = len(casList) - i = 0 - - df = pd.DataFrame() - if scrapingType == "all": - scrapingTypeList = ["RepeatedDose", "AcuteToxicity"] - else: - scrapingTypeList = [scrapingType] - - logging.info( - f"echa.echaExtract_multi(). Commencing mass extraction of {scrapingTypeList} for {casList}" - ) - - errors = [] - - for cas in casList: - for scrapingType in scrapingTypeList: - extraction = echaExtract( - substance=cas, - scrapingType=scrapingType, - outputType="df", - key_infos=key_infos, - local_search=local_search, - local_only=local_only - ) - if isinstance(extraction, pd.DataFrame) and not extraction.empty: - status = "successful_scrape" - logging.info( - f"echa.echaExtract_multi(). Succesfully scraped {scrapingType} for {cas}" - ) - - df = pd.concat([df, extraction], ignore_index=True) - if local and local_path: - df.to_csv(local_path, index=False) - - elif ( - (isinstance(extraction, pd.DataFrame) and extraction.empty) - or (extraction is None) - or (isinstance(extraction, str) and extraction.startswith("No data")) - ): - status = "no_data_found" - logging.info( - f"echa.echaExtract_multi(). Found no data for {scrapingType} for {cas}" - ) - elif isinstance(extraction, dict): - if extraction["error"]: - status = "df_creation_error" - errors.append(extraction) - logging.info( - f"echa.echaExtract_multi(). Df creation error for {scrapingType} for {cas}" - ) - elif isinstance(extraction, str) and extraction.startswith("Non esistono"): - status = "no_lead_dossiers" - logging.info( - f"echa.echaExtract_multi(). Found no lead dossiers for {cas}" - ) - else: - status = "unknown_error" - logging.error( - f"echa.echaExtract_multi(). Unknown error for {scrapingType} for {cas}" - ) - - if log_path: - fill_log(cas, status, log_path, scrapingType) - if debug_print: - print(f"{i}: {cas}, {scrapingType}") - i += 1 - - if error and errors and error_path: - with open(error_path, "w") as json_file: - json.dump(errors, json_file, indent=4) - - # Questa è la mossa che mi permette di eliminare 4 metodi - if filter: - df = filter_dataframe_by_dict(df, filter) - return df - - -def fill_log(cas: str, status: str, log_path: str, scrapingType: str): - """ - Funzione usata durante lo scraping di massa per fillare un log mentre estraggo le sostanze - """ - - df = pd.read_csv(log_path) - df.loc[df["casNo"] == cas, f"scraping_{scrapingType}"] = status - df.loc[df["casNo"] == cas, "timestamp"] = datetime.now().strftime("%Y-%m-%d") - - df.to_csv(log_path, index=False) - -def echaExtract_local(substance:str, scrapingType:str, key_infos=False): - if not key_infos: - query = f""" - SELECT * - FROM echa_full_scraping - WHERE CAS = '{substance}' AND page = '{scrapingType}' AND key_information IS NULL; - """ - elif key_infos: - query = f""" - SELECT * - FROM echa_full_scraping - WHERE CAS = '{substance}' AND page = '{scrapingType}'; - - """ - result = con.sql(query).df() - return result - -def filter_dataframe_by_dict(df, filter_dict): - """ - Filters a Pandas DataFrame based on a dictionary. - - Args: - df (pd.DataFrame): The input DataFrame. - filter_dict (dict): A dictionary where keys are column names and - values are lists of allowed values for that column. - - Returns: - pd.DataFrame: A new DataFrame containing only the rows that match - the filter criteria. - """ - - filter_condition = pd.Series(True, index=df.index) # Initialize with all True to start filtering - - for column_name, allowed_values in filter_dict.items(): - if column_name in df.columns: # Check if the column exists in the DataFrame - column_filter = df[column_name].isin(allowed_values) # Create a boolean Series for the current column - filter_condition = filter_condition & column_filter # Combine with existing condition using 'and' - else: - print(f"Warning: Column '{column_name}' not found in the DataFrame. Filter for this column will be ignored.") - - filtered_df = df[filter_condition] # Apply the combined filter condition - return filtered_df diff --git a/old/_old/find.py b/old/_old/find.py deleted file mode 100644 index 2c80850..0000000 --- a/old/_old/find.py +++ /dev/null @@ -1,497 +0,0 @@ -import requests -import urllib.parse -import json -import logging -import re -from datetime import datetime -from bs4 import BeautifulSoup -from typing import Dict, Union, Optional, Any - -# Settings per il logging -logging.basicConfig( - format="{asctime} - {levelname} - {message}", - style="{", - datefmt="%Y-%m-%d %H:%M", - filename=".log", - encoding="utf-8", - filemode="a", - level=logging.INFO, -) - - -# Constants for API endpoints -QUACKO_BASE_URL = "https://chem.echa.europa.eu" -QUACKO_SUBSTANCE_API = f"{QUACKO_BASE_URL}/api-substance/v1/substance" -QUACKO_DOSSIER_API = f"{QUACKO_BASE_URL}/api-dossier-list/v1/dossier" -QUACKO_HTML_PAGES = f"{QUACKO_BASE_URL}/html-pages" - -# Default sections to look for in the dossier -DEFAULT_SECTIONS = { - "id_7_Toxicologicalinformation": "ToxSummary", - "id_72_AcuteToxicity": "AcuteToxicity", - "id_75_Repeateddosetoxicity": "RepeatedDose", - "id_6_Ecotoxicologicalinformation": "EcotoxSummary", - "id_76_Genetictoxicity" : 'GeneticToxicity', - "id_42_Meltingpointfreezingpoint" : "MeltingFreezingPoint", - "id_43_Boilingpoint" : "BoilingPoint", - "id_48_Watersolubility" : "WaterSolubility", - "id_410_Surfacetension" : "SurfaceTension", - "id_420_pH" : "pH", - "Test" : "Test2" -} - -def search_dossier( - substance: str, - input_type: str = 'rmlCas', - sections: Dict[str, str] = None, - local_index_path: str = None -) -> Union[Dict[str, Any], str, bool]: - """ - Search for a chemical substance in the QUACKO database and retrieve its dossier information. - - Args: - substance (str): The identifier of the substance to search for (e.g. CAS number, name) - input_type (str): The type of identifier provided. Options: 'rmlCas', 'rmlName', 'rmlEc' - sections (Dict[str, str], optional): Dictionary mapping section IDs to result keys. - If None, default sections will be used. - local_index_path (str, optional): Path to a local index.html file to parse instead of - downloading from QUACKO. If provided, the function will - skip all API calls and only extract sections from the local file. - - Returns: - Union[Dict[str, Any], str, bool]: Dictionary with substance information and dossier links on success, - error message string if substance found but with issues, - False if substance not found or other critical error - """ - # Use default sections if none provided - if sections is None: - sections = DEFAULT_SECTIONS - - try: - results = {} - - # If a local file is provided, extract sections from it directly - if local_index_path: - logging.info(f"QUACKO.search() - Using local index file: {local_index_path}") - - # We still need some minimal info for constructing the URLs - if '/' not in local_index_path: - asset_id = "local" - rml_id = "local" - else: - # Try to extract information from the path if available - path_parts = local_index_path.split('/') - # If path follows expected structure: .../html-pages/ASSET_ID/index.html - if 'html-pages' in path_parts and 'index.html' in path_parts[-1]: - asset_id = path_parts[path_parts.index('html-pages') + 1] - rml_id = "extracted" # Just a placeholder - else: - asset_id = "local" - rml_id = "local" - - # Add these to results for consistency - results["assetExternalId"] = asset_id - results["rmlId"] = rml_id - - # Extract sections from the local file - section_links = get_section_links_from_file(local_index_path, asset_id, rml_id, sections) - if section_links: - results.update(section_links) - - return results - - # Normal flow with API calls - substance_data = get_substance_by_identifier(substance) - if not substance_data: - return False - - # Verify that the found substance matches the input identifier - if substance_data.get(input_type) != substance: - error_msg = (f"Search error: results[{input_type}] (\"{substance_data.get(input_type)}\") " - f"is not equal to \"{substance}\". Maybe you specified the wrong input_type. " - f"Check the results here: {substance_data.get('search_response')}") - logging.error(f"QUACKO.search(): {error_msg}") - return error_msg - - # Step 2: Find dossiers for the substance - rml_id = substance_data["rmlId"] - dossier_data = get_dossier_by_rml_id(rml_id, substance) - if not dossier_data: - return False - - # Merge substance and dossier data - results = {**substance_data, **dossier_data} - - # Step 3: Extract detailed information from dossier index page - asset_external_id = dossier_data["assetExternalId"] - section_links = get_section_links_from_index(asset_external_id, rml_id, sections) - if section_links: - results.update(section_links) - - logging.info(f"QUACKO.search() OK. output: {json.dumps(results)}") - return results - - except Exception as e: - logging.error(f"QUACKO.search(): Unexpected error in search_dossier for '{substance}': {str(e)}") - return False - - -def get_substance_by_identifier(substance: str) -> Optional[Dict[str, str]]: - """ - Search the QUACKO database for a substance using the provided identifier. - - Args: - substance (str): The substance identifier to search for (CAS number, name, etc.) - - Returns: - Optional[Dict[str, str]]: Dictionary with substance information or None if not found - """ - encoded_substance = urllib.parse.quote(substance) - search_url = f"{QUACKO_SUBSTANCE_API}?pageIndex=1&pageSize=100&searchText={encoded_substance}" - - logging.info(f'QUACKO.search(). searching "{substance}"') - - try: - response = requests.get(search_url) - response.raise_for_status() # Raise exception for HTTP errors - data = response.json() - - if not data.get("items") or len(data["items"]) == 0: - logging.info(f"QUACKO.search() could not find substance for '{substance}'") - return None - - # Extract substance information - substance_index = data["items"][0]["substanceIndex"] - result = { - 'search_response': search_url, - 'rmlId': substance_index.get("rmlId", ""), - 'rmlName': substance_index.get("rmlName", ""), - 'rmlCas': substance_index.get("rmlCas", ""), - 'rmlEc': substance_index.get("rmlEc", "") - } - - logging.info( - f"QUACKO.search() found substance on QUACKO. " - f"rmlId: '{result['rmlId']}', rmlName: '{result['rmlName']}', rmlCas: '{result['rmlCas']}'" - ) - return result - - except requests.RequestException as e: - logging.error(f"QUACKO.search() - Request error while searching for substance '{substance}': {str(e)}") - return None - except (KeyError, IndexError) as e: - logging.error(f"QUACKO.search() - Data parsing error for substance '{substance}': {str(e)}") - return None - - -def get_dossier_by_rml_id(rml_id: str, substance_name: str) -> Optional[Dict[str, Any]]: - """ - Find dossiers for a substance using its RML ID. - - Args: - rml_id (str): The RML ID of the substance - substance_name (str): The name of the substance (for logging) - - Returns: - Optional[Dict[str, Any]]: Dictionary with dossier information or None if not found - """ - # First try active dossiers - dossier_results = _query_dossier_api(rml_id, "Active") - - # If no active dossiers found, try inactive ones - if not dossier_results: - logging.info( - f"QUACKO.search() - could not find active dossier for '{substance_name}'. " - "Proceeding to search in the unactive ones." - ) - dossier_results = _query_dossier_api(rml_id, "Inactive") - - if not dossier_results: - logging.info(f"QUACKO.search() - could not find unactive dossiers for '{substance_name}'") - return None - else: - logging.info(f"QUACKO.search() - found unactive dossiers for '{substance_name}'") - dossier_results["dossierType"] = "Inactive" - else: - logging.info(f"QUACKO.search() - found active dossiers for '{substance_name}'") - dossier_results["dossierType"] = "Active" - - return dossier_results - - -def _query_dossier_api(rml_id: str, status: str) -> Optional[Dict[str, Any]]: - """ - Helper function to query the QUACKO dossier API for a specific substance and status. - - Args: - rml_id (str): The RML ID of the substance - status (str): The status of dossiers to search for ('Active' or 'Inactive') - - Returns: - Optional[Dict[str, Any]]: Dictionary with dossier information or None if not found - """ - url = f"{QUACKO_DOSSIER_API}?pageIndex=1&pageSize=100&rmlId={rml_id}®istrationStatuses={status}" - - try: - response = requests.get(url) - response.raise_for_status() - data = response.json() - - if not data.get("items") or len(data["items"]) == 0: - return None - - result = { - "assetExternalId": data["items"][0]["assetExternalId"], - "rootKey": data["items"][0]["rootKey"], - } - - # Extract last update date if available - try: - last_update = data["items"][0]["lastUpdatedDate"] - datetime_object = datetime.fromisoformat(last_update.replace('Z', '+00:00')) - result['lastUpdateDate'] = datetime_object.date().isoformat() - except (KeyError, ValueError) as e: - logging.error(f"QUACKO.search() - Error extracting lastUpdateDate: {str(e)}") - - # Add index URLs - result["index"] = f"{QUACKO_HTML_PAGES}/{result['assetExternalId']}/index.html" - result["index_js"] = f"{QUACKO_BASE_URL}/{rml_id}/dossier-view/{result['assetExternalId']}" - - return result - - except requests.RequestException as e: - logging.error(f"QUACKO.search() - Request error while getting dossiers for RML ID '{rml_id}': {str(e)}") - return None - except (KeyError, IndexError) as e: - logging.error(f"QUACKO.search() - Data parsing error for RML ID '{rml_id}': {str(e)}") - return None - - -def get_section_links_from_index( - asset_id: str, - rml_id: str, - sections: Dict[str, str] -) -> Dict[str, str]: - """ - Extract links to specified sections from the dossier index page by downloading it. - - Args: - asset_id (str): The asset external ID of the dossier - rml_id (str): The RML ID of the substance - sections (Dict[str, str]): Dictionary mapping section IDs to result keys - - Returns: - Dict[str, str]: Dictionary with links to the requested sections - """ - index_url = f"{QUACKO_HTML_PAGES}/{asset_id}/index.html" - - try: - response = requests.get(index_url) - response.raise_for_status() - - # Parse content using the shared method - return parse_sections_from_html(response.text, asset_id, rml_id, sections) - - except requests.RequestException as e: - logging.error(f"QUACKO.search() - Request error while extracting section links: {str(e)}") - return {} - except Exception as e: - logging.error(f"QUACKO.search() - Error extracting section links: {str(e)}") - return {} - - -def get_section_links_from_file( - file_path: str, - asset_id: str, - rml_id: str, - sections: Dict[str, str] -) -> Dict[str, str]: - """ - Extract links to specified sections from a local index.html file. - - Args: - file_path (str): Path to the local index.html file - asset_id (str): The asset external ID to use for constructing URLs - rml_id (str): The RML ID to use for constructing URLs - sections (Dict[str, str]): Dictionary mapping section IDs to result keys - - Returns: - Dict[str, str]: Dictionary with links to the requested sections - """ - try: - if not os.path.exists(file_path): - logging.error(f"QUACKO.search() - Local file not found: {file_path}") - return {} - - with open(file_path, 'r', encoding='utf-8') as file: - html_content = file.read() - - # Parse content using the shared method - return parse_sections_from_html(html_content, asset_id, rml_id, sections) - - except FileNotFoundError: - logging.error(f"QUACKO.search() - File not found: {file_path}") - return {} - except Exception as e: - logging.error(f"QUACKO.search() - Error parsing local file {file_path}: {str(e)}") - return {} - - -def parse_sections_from_html( - html_content: str, - asset_id: str, - rml_id: str, - sections: Dict[str, str] -) -> Dict[str, str]: - """ - Parse HTML content to extract links to specified sections. - - Args: - html_content (str): HTML content to parse - asset_id (str): The asset external ID to use for constructing URLs - rml_id (str): The RML ID to use for constructing URLs - sections (Dict[str, str]): Dictionary mapping section IDs to result keys - - Returns: - Dict[str, str]: Dictionary with links to the requested sections - """ - result = {} - - try: - soup = BeautifulSoup(html_content, "html.parser") - - # Extract each requested section - for section_id, section_name in sections.items(): - section_links = extract_section_links(soup, section_id, asset_id, rml_id, section_name) - if section_links: - result.update(section_links) - logging.info(f"QUACKO.search() - Found section '{section_name}' in document") - else: - logging.info(f"QUACKO.search() - Section '{section_name}' not found in document") - - return result - - except Exception as e: - logging.error(f"QUACKO.search() - Error parsing HTML content: {str(e)}") - return {} - - - -# -------------------------------------------------------------------------- -# Function to Extract Section Links with Validation -# -------------------------------------------------------------------------- -# This function extracts the document link associated with a specific section ID -# from the QUACKO index.html page structure. -# -# Problem Solved: -# Previous attempts faced issues where searching for a link within a parent -# section's div (e.g., "7 Toxicological Information" with id="id_7_...") -# would incorrectly grab the link belonging to the *first child* section -# (e.g., "7.2 Acute Toxicity" with id="id_72_..."). This happened because -# the simple `find("a", href=True)` doesn't distinguish ownership when nested. -# -# Solution Logic: -# 1. Find Target Div: Locate the `div` element using the specific `section_id` provided. -# This div typically contains the section's content or nested subsections. -# 2. Find First Link: Find the very first `` tag that has an `href` attribute -# somewhere *inside* the `target_div`. -# 3. Find Link's Owning Section Div: Starting from the `first_link_tag`, traverse -# up the HTML tree using `find_parent()` to find the nearest ancestor `div` -# whose `id` attribute starts with "id_" (the pattern for section containers). -# 4. Validate Ownership: Compare the `id` of the `link_ancestor_section_div` found -# in step 3 with the original `section_id` passed into the function. -# 5. Decision: -# - If the IDs MATCH: It confirms that the `first_link_tag` truly belongs to the -# `section_id` we are querying. The function proceeds to extract and format -# this link. -# - If the IDs DO NOT MATCH: It indicates that the first link found actually -# belongs to a *nested* subsection div. Therefore, the original `section_id` -# (the parent/container) does not have its own direct link, and the function -# correctly returns an empty dictionary for this `section_id`. -# -# This validation step ensures that we only return links that are directly -# associated with the queried section ID, preventing the inheritance bug. -# -------------------------------------------------------------------------- -def extract_section_links( - soup: BeautifulSoup, - section_id: str, - asset_id: str, - rml_id: str, - section_name: str -) -> Dict[str, str]: - """ - Extracts a link for a specific section ID by finding the first link - within its div and verifying that the link belongs directly to that - section, not a nested subsection. - - Args: - soup (BeautifulSoup): The BeautifulSoup object of the index page. - section_id (str): The HTML ID of the section div. - asset_id (str): The asset external ID of the dossier. - rml_id (str): The RML ID of the substance. - section_name (str): The name to use for the section in the result. - - Returns: - Dict[str, str]: Dictionary with link if found and validated, - otherwise empty. - """ - result = {} - - # 1. Find the target div for the section ID - target_div = soup.find("div", id=section_id) - if not target_div: - logging.info(f"QUACKO.search() - extract_section_links(): No div found for id='{section_id}'") - return result - - # 2. Find the first tag with an href within this target div - first_link_tag = target_div.find("a", href=True) - if not first_link_tag: - logging.info(f"QUACKO.search() - extract_section_links(): No 'a' tag with href found within div id='{section_id}'") - return result # No links at all within this section - - # 3. Validate: Find the closest ancestor div with an ID starting with "id_" - # This tells us which section container the link *actually* resides in. - # We use a lambda function for the id check. - # Need to handle potential None if the structure is unexpected. - link_ancestor_section_div: Optional[Tag] = first_link_tag.find_parent( - "div", id=lambda x: x and x.startswith("id_") - ) - - # 4. Compare IDs - if link_ancestor_section_div and link_ancestor_section_div.get('id') == section_id: - # The first link found belongs directly to the section we are looking for. - logging.debug(f"QUACKO.search() - extract_section_links(): Valid link found for id='{section_id}'.") - a_tag_to_use = first_link_tag # Use the link we found - else: - # The first link found belongs to a *different* (nested) section - # or the structure is broken (no ancestor div with id found). - # Therefore, the section_id we were originally checking has no direct link. - ancestor_id = link_ancestor_section_div.get('id') if link_ancestor_section_div else "None" - logging.info(f"QUACKO.search() - extract_section_links(): First link within id='{section_id}' belongs to ancestor id='{ancestor_id}'. No direct link for '{section_id}'.") - return result # Return empty dict - - # 5. Proceed with link extraction using the validated a_tag_to_use - try: - document_id = a_tag_to_use.get('href') # Use .get() for safety - if not document_id: - logging.error(f"QUACKO.search() - extract_section_links(): Found 'a' tag for '{section_name}' has no href attribute.") - return {} - - # Clean up the document ID - if document_id.startswith('./documents/'): - document_id = document_id.replace('./documents/', '') - if document_id.endswith('.html'): - document_id = document_id.replace('.html', '') - - # Construct the full URLs unless in local-only mode - if asset_id == "local" and rml_id == "local": - result[section_name] = f"Local section found: {document_id}" - else: - result[section_name] = f"{QUACKO_HTML_PAGES}/{asset_id}/documents/{document_id}.html" - result[f"{section_name}_js"] = f"{QUACKO_BASE_URL}/{rml_id}/dossier-view/{asset_id}/{document_id}" - - return result - - except Exception as e: # Catch potential errors during processing - logging.error(f"QUACKO.search() - extract_section_links(): Error processing the validated link tag for '{section_name}': {str(e)}") - return {} \ No newline at end of file diff --git a/old/_old/mongo_functions.py b/old/_old/mongo_functions.py deleted file mode 100644 index f5a7d6e..0000000 --- a/old/_old/mongo_functions.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Optional -from pymongo.mongo_client import MongoClient -from pymongo.server_api import ServerApi -from pymongo.database import Database - - -#region Funzioni generali connesse a MongoDB - -# Funzione di connessione al database - -def connect(user : str,password : str, database : str = "INCI") -> Database: - - uri = f"mongodb+srv://{user}:{password}@ufs13.dsmvdrx.mongodb.net/?retryWrites=true&w=majority&appName=UFS13" - client = MongoClient(uri, server_api=ServerApi('1')) - db = client['toxinfo'] - return db - -#endregion - -#region Funzioni di ricerca all'interno del mio DB - -# Funzione di ricerca degli elementi estratti dal cosing -def value_search(db : Database,value:str,mode : Optional[str] = None) -> Optional[dict]: - if mode: - json = db.toxinfo.find_one({mode:value},{"_id":0}) - if json: - return json - return None - else: - modes = ['commonName','inciName','casNo','ecNo','chemicalName','phEurName'] - for el in modes: - json = db.toxinfo.find_one({el:value},{"_id":0}) - if json: - return json - return None - -#endregion diff --git a/old/_old/pubchem.py b/old/_old/pubchem.py deleted file mode 100644 index bbd09f7..0000000 --- a/old/_old/pubchem.py +++ /dev/null @@ -1,149 +0,0 @@ - -import os -from contextlib import contextmanager -import pubchempy as pcp -from pubchemprops.pubchemprops import get_second_layer_props -import logging - -logging.basicConfig( - format="{asctime} - {levelname} - {message}", - style="{", - datefmt="%Y-%m-%d %H:%M", - filename="echa.log", - encoding="utf-8", - filemode="a", - level=logging.INFO, -) - -@contextmanager -def temporary_certificate(cert_path): - # Sto robo serve perchè per usare l'API di PubChem serve cambiare temporaneamente il certificato con il quale - # si fanno le richieste - - """ - Context manager to temporarily change the certificate used for requests. - - Args: - cert_path (str): Path to the certificate file to use temporarily - - Example: - # Regular request uses default certificates - requests.get('https://api.example.com') - - # Use custom certificate only within this block - with temporary_certificate('custom-cert.pem'): - requests.get('https://api.requiring.custom.cert.com') - - # Back to default certificates - requests.get('https://api.example.com') - """ - # Store original environment variables - original_ca_bundle = os.environ.get('REQUESTS_CA_BUNDLE') - original_ssl_cert = os.environ.get('SSL_CERT_FILE') - - try: - # Set new certificate - os.environ['REQUESTS_CA_BUNDLE'] = cert_path - os.environ['SSL_CERT_FILE'] = cert_path - yield - finally: - # Restore original environment variables - if original_ca_bundle is not None: - os.environ['REQUESTS_CA_BUNDLE'] = original_ca_bundle - else: - os.environ.pop('REQUESTS_CA_BUNDLE', None) - - if original_ssl_cert is not None: - os.environ['SSL_CERT_FILE'] = original_ssl_cert - else: - os.environ.pop('SSL_CERT_FILE', None) - -def clean_property_data(api_response): - """ - Simplifies the API response data by flattening nested structures. - - Args: - api_response (dict): Raw API response containing property data - - Returns: - dict: Cleaned data with simplified structure - """ - cleaned_data = {} - - for property_name, measurements in api_response.items(): - cleaned_measurements = [] - - for measurement in measurements: - cleaned_measurement = { - 'ReferenceNumber': measurement.get('ReferenceNumber'), - 'Description': measurement.get('Description', ''), - } - - # Handle Reference field - if 'Reference' in measurement: - # Check if Reference is a list or string - ref = measurement['Reference'] - cleaned_measurement['Reference'] = ref[0] if isinstance(ref, list) else ref - - # Handle Value field - value = measurement.get('Value', {}) - if isinstance(value, dict) and 'StringWithMarkup' in value: - cleaned_measurement['Value'] = value['StringWithMarkup'][0]['String'] - else: - cleaned_measurement['Value'] = str(value) - - # Remove empty values - cleaned_measurement = {k: v for k, v in cleaned_measurement.items() if v} - - cleaned_measurements.append(cleaned_measurement) - - cleaned_data[property_name] = cleaned_measurements - - return cleaned_data - -def pubchem_dap(cas): - ''' - Data un CAS in input ricerca le informazioni per la scheda di sicurezza su PubChem. - Per estrarre le proprietà di 1o (sinonimi, cid, logP, MolecularWeight, ExactMass, TPSA) livello uso Pubchempy. - Per quelle di 2o livello uso pubchemprops (Melting point) - - args: - cas : string - - ''' - with temporary_certificate('src/data/ncbi-nlm-nih-gov-catena.pem'): - try: - # Ricerca iniziale - out = pcp.get_synonyms(cas, 'name') - if out: - out = out[0] - output = {'CID' : out['CID'], - 'CAS' : cas, - 'first_pubchem_name' : out['Synonym'][0], - 'pubchem_link' : f"https://pubchem.ncbi.nlm.nih.gov/compound/{out['CID']}"} - else: - return f'No results on PubChem for {cas}' - - except Exception as E: - logging.error(f'various_utils.pubchem.pubchem_dap(). Some error during pubchem search for {cas}', exc_info=True) - - try: - # Ricerca delle proprietà - properties = pcp.get_properties(['xlogp', 'molecular_weight', 'tpsa', 'exact_mass'], identifier = out['CID'], namespace='cid', searchtype=None, as_dataframe=False) - if properties: - output = {**output, **properties[0]} - else: - return output - except Exception as E: - logging.error(f'various_utils.pubchem.pubchem_dap(). Some error during pubchem first level properties extraction for {cas}', exc_info=True) - - try: - # Ricerca del Melting Point - second_layer_props = get_second_layer_props(output['first_pubchem_name'], ['Melting Point', 'Dissociation Constants', 'pH']) - if second_layer_props: - second_layer_props = clean_property_data(second_layer_props) - output = {**output, **second_layer_props} - except Exception as E: - logging.error(f'various_utils.pubchem.pubchem_dap(). Some error during pubchem second level properties extraction (Melting Point) for {cas}', exc_info=True) - - return output diff --git a/old/_old/scraper_cosing.py b/old/_old/scraper_cosing.py deleted file mode 100644 index 2e6c60f..0000000 --- a/old/_old/scraper_cosing.py +++ /dev/null @@ -1,182 +0,0 @@ -import json as js -import re -import requests as req -from typing import Union - - -#region Funzione che processa una lista di CAS presa da Cosing (Grazie Jem) - -def parse_cas_numbers(cas_string:list) -> list: - - # Siccome ci assicuriamo esternamente che esista almeno un cas possiamo prendere la stringa - cas_string = cas_string[0] - - # Rimuoviamo parentesi e il loro contenuto - cas_string = re.sub(r"\([^)]*\)", "", cas_string) - - # Eseguiamo uno split su vari possibili separatori - cas_parts = re.split(r"[/;,]", cas_string) - - # Otteniamo una lista utilizzando i cas precedenti e rimuovendo spazi in eccesso - cas_list = [cas.strip() for cas in cas_parts] - - # Alcuni cas sono divisi da un doppio dash (--) che dobbiamo rimuovere - # è però necessario farlo ora in seconda battuta - - if len(cas_list) == 1 and "--" in cas_list[0]: - - cas_list = [cas.strip() for cas in cas_list[0].split("--")] - - # Siccome alcuni cas hanno valori non validi ("-") li cerchiamo e rimuoviamo - if cas_list: - - while "-" in cas_list: - cas_list.remove("-") - - return cas_list -#endregion - -#region Funzione per eseguire una ricerca direttamente sul cosing - -# Il primo argomento è la stringa da cercare, il secondo indica il tipo di ricerca -def cosing_search(text : str, mode : str = "name") -> Union[list,dict,None]: - cosing_post_req = "https://api.tech.ec.europa.eu/search-api/prod/rest/search?apiKey=285a77fd-1257-4271-8507-f0c6b2961203&text=*&pageSize=100&pageNumber=1" - agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0" - - # La modalità di ricerca base è quella per nome, che sia INCI o di altro tipo - if mode == "name": - search_query = {"bool": - {"must":[ - {"text": - {"query":f"{text}","fields": - ["inciName.exact","inciUsaName","innName.exact","phEurName","chemicalName","chemicalDescription"], - "defaultOperator":"AND"}}]}} - - # In caso di ricerca per numero cas o EC il payload della richiesta è diverso - elif mode in ["cas","ec"]: - search_query = {"bool": {"must": [{"text": {"query": f"*{text}*","fields": ["casNo", "ecNo"]}}]}} - - # La ricerca per ID è necessaria dove serva recuperare gli identified ingredients - elif mode == "id": - search_query = {"bool":{"must":[{"term":{"substanceId":f"{text}"}}]}} - - # Se la mode inserita non è prevista lancio un errore - else: - raise ValueError - - # Creo il payload della mia request - files = {"query": ("query",js.dumps(search_query),"application/json")} - - # Eseguo la post di ricerca - risposta = req.post(cosing_post_req,headers={"user_agent":agent,"Connection":"keep-alive"},files=files) - risposta = risposta.json() - if risposta["results"]: - - return risposta["results"][0]["metadata"] - - # La funzione ritorna None quando non ho risultati dalla mia ricerca - return risposta.status_code -#endregion - -#region Funzione per pulire un json cosing e restituirlo - -def clean_cosing(json : dict, full : bool = True) -> dict: - - # Definisco i campi di nostro interesse e li divido in base al tipo di output finale che desidero - - string_cols = ["itemType","nameOfCommonIngredientsGlossary","inciName","phEurName","chemicalName","innName","substanceId","refNo"] - list_cols = ["casNo","ecNo","functionName","otherRestrictions","sccsOpinion","sccsOpinionUrls","identifiedIngredient","annexNo","otherRegulations"] - - # Creo una lista con tutti i campi su cui ciclare - - total_keys = string_cols + list_cols - - # Definisco la base dell"url che mi serve per ottenere il link cosing della sostanza - - base_url = "https://ec.europa.eu/growth/tools-databases/cosing/details/" - clean_json = {} - - # Ciclo su tutti i campi di interesse - - for key in total_keys: - - # Alcuni campi contengono una dicitura inutile che occupa solo spazio - # per cui provvedo a rimuoverla - - while "" in json[key]: - json[key].remove("") - - # Se il campo dovrà avere in output una lista, posso anche accettare le liste vuote del cosing come valore - - if key in list_cols: - value = json[key] - - # Il cas e l"ec sono casi speciali, per cui eseguo delle funzioni in più quando lo tratto - - if key in ["casNo", "ecNo"]: - if value: - value = parse_cas_numbers(value) - - # Completiamo direttamente il json di ritorno ove presenti degli identifiedIngredient, - # solo dove il flag "full" è true - - elif key == "identifiedIngredient": - if full: - if value: - value = identified_ingredients(value) - - clean_json[key] = value - - else: - - # Questo nome di campo era troppo lungo e ho preferito semplificarlo - - if key == "nameOfCommonIngredientsGlossary": - nKey = "commonName" - - # Dovendo rinominare alcuni campi, negli altri casi copio il nome iniziale - - else: - nKey = key - - # Siccome voglio in output una stringa semplice e se usassi uno slicer su di una lista vuota - # devo prima verificare che la lista cosing contenga dei valori - - if json[key]: - clean_json[nKey] = json[key][0] - else: - clean_json[nKey] = "" - - # Il campo cosingUrl non esiste ancora, vado a crearlo unendo il substance ID all"url base - - clean_json["cosingUrl"] = f"{base_url}{json["substanceId"][0]}" - - return clean_json -#endregion - -#region Funzione per completare, se necessario, un json cosing - -def identified_ingredients(id_list : list) -> list: - - identified = [] - - # Per ognuno degli ingredienti in IdentifiedIngredients eseguo una ricerca - - for id in id_list: - - ingredient = cosing_search(id,"id") - - if ingredient: - - # Vado a pulire i json appena trovati - - ingredient = clean_cosing(ingredient,full=False) - - # Ora salvo nella lista il documento pulito - - identified.append(ingredient) - - # Finito di popolare la lista con gli oggetti degli identifiedIngredient, la ritorno - - return identified -#endregion \ No newline at end of file diff --git a/old/echa_find.py b/old/echa_find.py deleted file mode 100644 index 85b625f..0000000 --- a/old/echa_find.py +++ /dev/null @@ -1,223 +0,0 @@ -import requests -import urllib.parse -import re as standardre -import json -from bs4 import BeautifulSoup -from datetime import datetime -from pif_compiler.functions.common_log import get_logger - -logger = get_logger() - -# Funzione per cercare il dossier dato in input un CAS, una sostanza o un EN -def search_dossier(substance, input_type='rmlCas'): - results = {} - # Il dizionario che farò tornare alla fine - - # Prima parte. Ottengo rmlID e rmlName - # st.code('https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText='+substance) - req_0 = requests.get( - "https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText=" - + urllib.parse.quote(substance) #va convertito per il web - ) - - logger.info(f'echaFind.search_dossier(). searching "{substance}"') - - #'La prima cosa da fare è fare una ricerca con il nome della sostanza ma trasformata attraverso urllib' - req_0_json = req_0.json() - try: - # Estraggo i campi che mi servono dalla response - rmlId = req_0_json["items"][0]["substanceIndex"]["rmlId"] - rmlName = req_0_json["items"][0]["substanceIndex"]["rmlName"] - rmlCas = req_0_json["items"][0]["substanceIndex"]["rmlCas"] - rmlEc = req_0_json["items"][0]["substanceIndex"]["rmlEc"] - - results['search_response'] = f"https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText={urllib.parse.quote(substance)}" - results["rmlId"] = rmlId - results["rmlName"] = rmlName - results["rmlCas"] = rmlCas - results["rmlEc"] = rmlEc - - logger.info( - f"echaFind.search_dossier(). found substance on ECHA. rmlId: '{rmlId}', rmlName: '{rmlName}', rmlCas: '{rmlCas}'" - ) - except: - logger.info( - f"echaFind.search_dossier(). could not find substance for '{substance}'" - ) - return False - - # Update: in certi casi poteva verificarsi che inserendo un CAS si trovasse invece una sostanza con codice EN uguale al CAS in input. - # Ora controllo che la sostanza trovata abbia effettivamente un CAS uguale a quello inserito in input. - # è inoltre possibile cercare per rmlName (nome della sostanza) o EN (rmlEn): basta specificare in input_type per cosa si sta cercando - if results[input_type] != substance: - logger.error(f'echa.echaFind.search_dossier(): results[{input_type}] "{results[input_type]}is not equal to "{substance}". ') - return f'search_error. results[{input_type}] ("{results[input_type]}") is not equal to "{substance}". Maybe you specified the wrong input_type. Check the results here: https://chem.echa.europa.eu/api-substance/v1/substance?pageIndex=1&pageSize=100&searchText={urllib.parse.quote(substance)}' - - # Seconda parte. Cerco sul sito ECHA dei dossiers creando un link con l'ID precedentemente ottenuto. - req_1_url = ( - "https://chem.echa.europa.eu/api-dossier-list/v1/dossier?pageIndex=1&pageSize=100&rmlId=" - + rmlId - + "®istrationStatuses=Active" - ) # Prima cerco negli active. - - req_1 = requests.get(req_1_url) - req_1_json = req_1.json() - - # Se non esistono dossiers attivi cerco quelli inattivi - if req_1_json["items"] == []: - logger.info( - f"echaFind.search_dossier(). could not find active dossier for '{substance}'. Proceeding to search in the unactive ones." - ) - req_1_url = ( - "https://chem.echa.europa.eu/api-dossier-list/v1/dossier?pageIndex=1&pageSize=100&rmlId=" - + rmlId - + "®istrationStatuses=Inactive" - ) - req_1 = requests.get(req_1_url) - req_1_json = req_1.json() - if req_1_json["items"] == []: - logger.info( - f"echaFind.search_dossier(). could not find unactive dossiers for '{substance}'" - ) # Non ho trovato nè dossiers inattivi che attivi - return False - else: - logger.info( - f"echaFind.search_dossier(). found unactive dossiers for '{rmlName}'" - ) - results["dossierType"] = "Inactive" - - else: - logger.info( - f"echaFind.search_dossier(). found active dossiers for '{substance}'" - ) - results["dossierType"] = "Active" - - # Queste erano le due robe che mi servivano - assetExternalId = req_1_json["items"][0]["assetExternalId"] - - # UPDATE: Per ottenere la data dell'ultima modifica: serve per capire se abbiamo già dei file aggiornati scaricati in locale - # confrontare data di scraping e ultimo aggiornato (se prima o dopo) - - try: - lastUpdateDate = req_1_json["items"][0]["lastUpdatedDate"] - datetime_object = datetime.fromisoformat(lastUpdateDate.replace('Z', '+00:00')) # Handle 'Z' if present, else it might break on older python versions - lastUpdateDate = datetime_object.date().isoformat() - results['lastUpdateDate'] = lastUpdateDate - except: - logger.error(f"echa.echaFind(). Could not find lastUpdateDate for the dossier") - - rootKey = req_1_json["items"][0]["rootKey"] - - # PARTE DI HTML - - # Terza parte. Ottengo assetExternalId - # "Con l'assetExternalId è possibile arrivare alla pagina principale del dossier." - # "Da questa pagina bisogna scrappare l'ID del riassunto tossicologico, :red[**SE ESISTE**]" - results["index"] = ( - "https://chem.echa.europa.eu/html-pages" + assetExternalId + "/index.html" - ) - results["index_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}" - ) - - req_2 = requests.get( - "https://chem.echa.europa.eu/html-pages/" + assetExternalId + "/index.html" - ) - index = BeautifulSoup(req_2.text, "html.parser") - index.prettify() - - # Quarta parte. Ottengo l'ID del riassunto tossicologico dall'index.html - # "In tutto quell'HTML ci interessa solo un div. BeautifulSoup ha problemi se ci sono troppi div innestati. Quindi uso una combinazione di quello e di Regex" - - div = index.find_all("div", id=["id_7_Toxicologicalinformation"]) - str_div = str(div) - str_div = str_div.split("")[0] - - # UIC è l'id del toxsummary - uic_found = False - if type(standardre.search('href="([^"]+)"', str_div)).__name__ == "NoneType": - # Un regex per trovare l'href che mi serve - logger.info( - f"echaFind.search_dossier(). Could not find 'id_7_Toxicologicalinformation' in the body" - ) - else: - UIC = standardre.search('href="([^"]+)"', str_div).group(1) - uic_found = True - - # Per l'acute toxicity - acute_toxicity_found = False - div_acute_toxicity = index.find_all("div", id=["id_72_AcuteToxicity"]) - if div_acute_toxicity: - for div in div_acute_toxicity: - try: - a = div.find_all("a", href=True)[0] - acute_toxicity_id = standardre.search('href="([^"]+)"', str(a)).group(1) - acute_toxicity_found = True - except: - logger.info( - f"echaFind.search_dossier(). No acute_toxicity_id found from index for {substance}" - ) - - # Per il repeated dose - repeated_dose_found = False - div_repeated_dose = index.find_all("div", id=["id_75_Repeateddosetoxicity"]) - if div_repeated_dose: - for div in div_repeated_dose: - try: - a = div.find_all("a", href=True)[0] - repeated_dose_id = standardre.search('href="([^"]+)"', str(a)).group(1) - repeated_dose_found = True - except: - logger.info( - f"echaFind.search_dossier(). No repeated_dose_id found from index for {substance}" - ) - - # Quinta parte. Recupero l'html del dossier tossicologico e faccio ritornare il content - - if acute_toxicity_found: - acute_toxicity_link = ( - "https://chem.echa.europa.eu/html-pages/" - + assetExternalId - + "/documents/" - + acute_toxicity_id - + ".html" - ) - results["AcuteToxicity"] = acute_toxicity_link - # ci sono due link diversi: uno solo html brutto ma che ha le info leggibile, mentre js è la versione più bella presentata all'utente, - # usata per creare il pdf carino - results["AcuteToxicity_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}/{acute_toxicity_id}" - ) - - if uic_found: - # UIC è l'id del toxsummary - final_url = ( - "https://chem.echa.europa.eu/html-pages/" - + assetExternalId - + "/documents/" - + UIC - + ".html" - ) - results["ToxSummary"] = final_url - results["ToxSummary_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}/{UIC}" - ) - - if repeated_dose_found: - results["RepeatedDose"] = ( - "https://chem.echa.europa.eu/html-pages/" - + assetExternalId - + "/documents/" - + repeated_dose_id - + ".html" - ) - results["RepeatedDose_js"] = ( - f"https://chem.echa.europa.eu/{rmlId}/dossier-view/{assetExternalId}/{repeated_dose_id}" - ) - - json_formatted_str = json.dumps(results) - logger.info(f"echaFind.search_dossier() OK. output: {json_formatted_str}") - return results - -if __name__ == "__main__": - search_dossier("100-41-4", input_type='rmlCas') \ No newline at end of file diff --git a/old/echa_pdf.py b/old/echa_pdf.py deleted file mode 100644 index de1342b..0000000 --- a/old/echa_pdf.py +++ /dev/null @@ -1,477 +0,0 @@ -import os -import base64 -import traceback -import logging # Import logging module -import datetime -import pandas as pd -# import time # Keep if you use page.wait_for_timeout -from playwright.sync_api import sync_playwright, TimeoutError # Catch specific errors -from pif_compiler.services.echa_find import search_dossier -import requests - -# --- Basic Logging Setup (Commented Out) --- -# # Configure logging - uncomment and customize level/handler as needed -# logging.basicConfig( -# level=logging.INFO, # Or DEBUG for more details -# format='%(asctime)s - %(levelname)s - %(message)s', -# # filename='pdf_generator.log', # Optional: Log to a file -# # filemode='a' -# ) -# --- End Logging Setup --- - - -# Assume svg_to_data_uri is defined elsewhere correctly -def svg_to_data_uri(svg_path): - try: - if not os.path.exists(svg_path): - # logging.error(f"SVG file not found: {svg_path}") # Example logging - raise FileNotFoundError(f"SVG file not found: {svg_path}") - with open(svg_path, 'rb') as f: - svg_content = f.read() - encoded_svg = base64.b64encode(svg_content).decode('utf-8') - return f"data:image/svg+xml;base64,{encoded_svg}" - except Exception as e: - print(f"Error converting SVG {svg_path}: {e}") - # logging.error(f"Error converting SVG {svg_path}: {e}", exc_info=True) # Example logging - return None - -# --- JavaScript Expressions --- - -# Define the cleanup logic as an immediately-invoked arrow function expression -# NOTE: .das-block_empty removal is currently disabled as per previous step -cleanup_js_expression = """ -() => { - console.log('Running cleanup JS (DISABLED .das-block_empty removal)...'); - let totalRemoved = 0; - - // Example 1: Remove sections explicitly marked as empty (Currently Disabled) - // const emptyBlocks = document.querySelectorAll('.das-block_empty'); - // emptyBlocks.forEach(el => { - // if (el && el.parentNode) { - // console.log(`Removing '.das-block_empty' block with ID: ${el.id || 'N/A'}`); - // el.remove(); - // totalRemoved++; - // } - // }); - - // Add other specific cleanup logic here if needed - - console.log(`Cleanup script removed ${totalRemoved} elements (DISABLED .das-block_empty removal).`); - return totalRemoved; // Return the count -} -""" -# --- End JavaScript Expressions --- - - -def generate_pdf_with_header_and_cleanup( - url, - pdf_path, - substance_name, - substance_link, - ec_number, - cas_number, - header_template_path=r"src\func\resources\injectableHeader.html", - echa_chem_logo_path=r"src\func\resources\echa_chem_logo.svg", - echa_logo_path=r"src\func\resources\ECHA_Logo.svg" -) -> bool: # Added return type hint - """ - Generates a PDF with a dynamic header and optionally removes empty sections. - Provides basic logging (commented out) and returns True/False for success/failure. - - Args: - url (str): The target URL OR local HTML file path. - pdf_path (str): The output PDF path. - substance_name (str): The name of the chemical substance. - substance_link (str): The URL the substance name should link to (in header). - ec_number (str): The EC number for the substance. - cas_number (str): The CAS number for the substance. - header_template_path (str): Path to the HTML header template file. - echa_chem_logo_path (str): Path to the echa_chem_logo.svg file. - echa_logo_path (str): Path to the ECHA_Logo.svg file. - - Returns: - bool: True if the PDF was generated successfully, False otherwise. - """ - final_header_html = None - # logging.info(f"Starting PDF generation for URL: {url} to path: {pdf_path}") # Example logging - - # --- 1. Prepare Header HTML --- - try: - # logging.debug(f"Reading header template from: {header_template_path}") # Example logging - print(f"Reading header template from: {header_template_path}") - if not os.path.exists(header_template_path): - raise FileNotFoundError(f"Header template file not found: {header_template_path}") - with open(header_template_path, 'r', encoding='utf-8') as f: - header_template_content = f.read() - if not header_template_content: - raise ValueError("Header template file is empty.") - - # logging.debug("Converting logos...") # Example logging - print("Converting logos...") - logo1_data_uri = svg_to_data_uri(echa_chem_logo_path) - logo2_data_uri = svg_to_data_uri(echa_logo_path) - if not logo1_data_uri or not logo2_data_uri: - raise ValueError("Failed to convert one or both logos to Data URIs.") - - # logging.debug("Replacing placeholders...") # Example logging - print("Replacing placeholders...") - final_header_html = header_template_content.replace("##ECHA_CHEM_LOGO_SRC##", logo1_data_uri) - final_header_html = final_header_html.replace("##ECHA_LOGO_SRC##", logo2_data_uri) - final_header_html = final_header_html.replace("##SUBSTANCE_NAME##", substance_name) - final_header_html = final_header_html.replace("##SUBSTANCE_LINK##", substance_link) - final_header_html = final_header_html.replace("##EC_NUMBER##", ec_number) - final_header_html = final_header_html.replace("##CAS_NUMBER##", cas_number) - - if "##" in final_header_html: - print("Warning: Not all placeholders seem replaced in the header HTML.") - # logging.warning("Not all placeholders seem replaced in the header HTML.") # Example logging - - except Exception as e: - print(f"Error during header setup phase: {e}") - traceback.print_exc() - # logging.error(f"Error during header setup phase: {e}", exc_info=True) # Example logging - return False # Return False on header setup failure - # --- End Header Prep --- - - # --- CSS Override Definition --- - # Using Revision 4 from previous step (simplified breaks, boundary focus) - selectors_to_fix = [ - '.das-field .das-field_value_html', - '.das-field .das-field_value_large', - '.das-field .das-value_remark-text' - ] - css_selector_string = ",\n".join(selectors_to_fix) - css_override = f""" - -""" - # --- End CSS Override Definition --- - - # --- Playwright Automation --- - try: - with sync_playwright() as p: - # logging.debug("Launching browser...") # Example logging - # browser = p.chromium.launch(headless=False, devtools=True) # For debugging - browser = p.chromium.launch() - page = browser.new_page() - # Capture console messages (Corrected: use msg.text) - page.on("console", lambda msg: print(f"Browser Console: {msg.text}")) - - try: - # logging.info(f"Navigating to page: {url}") # Example logging - print(f"Navigating to: {url}") - if os.path.exists(url) and not url.startswith('file://'): - page_url = f'file://{os.path.abspath(url)}' - # logging.info(f"Treating as local file: {page_url}") # Example logging - print(f"Treating as local file: {page_url}") - else: - page_url = url - - page.goto(page_url, wait_until='load', timeout=90000) - # logging.info("Page navigation complete.") # Example logging - - # logging.debug("Injecting header HTML...") # Example logging - print("Injecting header HTML...") - page.evaluate(f'(headerHtml) => {{ document.body.insertAdjacentHTML("afterbegin", headerHtml); }}', final_header_html) - - # logging.debug("Injecting CSS overrides...") # Example logging - print("Injecting CSS overrides...") - page.evaluate(f"""(css) => {{ - const existingStyle = document.getElementById('pdf-override-styles'); - if (existingStyle) existingStyle.remove(); - document.head.insertAdjacentHTML('beforeend', css); - }}""", css_override) - - # logging.debug("Running JavaScript cleanup function...") # Example logging - print("Running JavaScript cleanup function...") - elements_removed_count = page.evaluate(cleanup_js_expression) - # logging.info(f"Cleanup script finished (reported removing {elements_removed_count} elements).") # Example logging - print(f"Cleanup script finished (reported removing {elements_removed_count} elements).") - - - # --- Optional: Emulate Print Media --- - # print("Emulating print media...") - # page.emulate_media(media='print') - - # --- Generate PDF --- - # logging.info(f"Generating PDF: {pdf_path}") # Example logging - print(f"Generating PDF: {pdf_path}") - pdf_options = { - "path": pdf_path, "format": "A4", "print_background": True, - "margin": {'top': '20px', 'bottom': '20px', 'left': '20px', 'right': '20px'}, - "scale": 1.0 - } - page.pdf(**pdf_options) - # logging.info(f"PDF saved successfully to: {pdf_path}") # Example logging - print(f"PDF saved successfully to: {pdf_path}") - - # logging.debug("Closing browser.") # Example logging - print("Closing browser.") - browser.close() - return True # Indicate success - - except TimeoutError as e: - print(f"A Playwright TimeoutError occurred: {e}") - traceback.print_exc() - # logging.error(f"Playwright TimeoutError occurred: {e}", exc_info=True) # Example logging - browser.close() # Ensure browser is closed on error - return False # Indicate failure - except Exception as e: # Catch other potential errors during Playwright page operations - print(f"An unexpected error occurred during Playwright page operations: {e}") - traceback.print_exc() - # logging.error(f"Unexpected error during Playwright page operations: {e}", exc_info=True) # Example logging - # Optional: Save HTML state on error - try: - html_content = page.content() - error_html_path = pdf_path.replace('.pdf', '_error.html') - with open(error_html_path, 'w', encoding='utf-8') as f_err: - f_err.write(html_content) - # logging.info(f"Saved HTML state on error to: {error_html_path}") # Example logging - print(f"Saved HTML state on error to: {error_html_path}") - except Exception as save_e: - # logging.error(f"Could not save HTML state on error: {save_e}", exc_info=True) # Example logging - print(f"Could not save HTML state on error: {save_e}") - browser.close() # Ensure browser is closed on error - return False # Indicate failure - # Note: The finally block for the 'with sync_playwright()' context - # is handled automatically by the 'with' statement. - - except Exception as e: - # Catch errors during Playwright startup (less common) - print(f"An error occurred during Playwright setup/teardown: {e}") - traceback.print_exc() - # logging.error(f"Error during Playwright setup/teardown: {e}", exc_info=True) # Example logging - return False # Indicate failure - - -# --- Example Usage --- -# result = generate_pdf_with_header_and_cleanup( -# url='path/to/your/input.html', -# pdf_path='output.pdf', -# substance_name='Glycerol Example', -# substance_link='http://example.com/glycerol', -# ec_number='200-289-5', -# cas_number='56-81-5', -# ) -# -# if result: -# print("PDF Generation Succeeded.") -# # logging.info("Main script: PDF Generation Succeeded.") # Example logging -# else: -# print("PDF Generation Failed.") -# # logging.error("Main script: PDF Generation Failed.") # Example logging - - -def search_generate_pdfs( - cas_number_to_search: str, - page_types_to_extract: list[str], - base_output_folder: str = "data/library" -) -> bool: - """ - Searches for a substance by CAS, saves raw HTML and generates PDFs for - specified page types. Uses '_js' link variant for the PDF header link if available. - - Args: - cas_number_to_search (str): CAS number to search for. - page_types_to_extract (list[str]): List of page type names (e.g., 'RepeatedDose'). - Expects '{page_type}' and '{page_type}_js' keys - in the search result. - base_output_folder (str): Root directory for saving HTML/PDFs. - - Returns: - bool: True if substance found and >=1 requested PDF generated, False otherwise. - """ - # logging.info(f"Starting process for CAS: {cas_number_to_search}") - print(f"\n===== Processing request for CAS: {cas_number_to_search} =====") - - # --- 1. Search for Dossier Information --- - try: - # logging.debug(f"Calling search_dossier for CAS: {cas_number_to_search}") - search_result = search_dossier(substance=cas_number_to_search, input_type='rmlCas') - except Exception as e: - print(f"Error during dossier search for CAS '{cas_number_to_search}': {e}") - traceback.print_exc() - # logging.error(f"Exception during search_dossier for CAS '{cas_number_to_search}': {e}", exc_info=True) - return False - - if not search_result: - print(f"Substance not found or search failed for CAS: {cas_number_to_search}") - # logging.warning(f"Substance not found or search failed for CAS: {cas_number_to_search}") - return False - - # logging.info(f"Substance found for CAS: {cas_number_to_search}") - print(f"Substance found: {search_result.get('rmlName', 'N/A')}") - - # --- 2. Extract Details and Filter Pages --- - try: - # Extract required info - rml_id = search_result.get('rmlId') - rml_name = search_result.get('rmlName') - rml_cas = search_result.get('rmlCas') - rml_ec = search_result.get('rmlEc') - asset_ext_id = search_result.get('assetExternalId') - - # Basic validation - if not all([rml_id, rml_name, rml_cas, rml_ec, asset_ext_id]): - missing_keys = [k for k, v in {'rmlId': rml_id, 'rmlName': rml_name, 'rmlCas': rml_cas, 'rmlEc': rml_ec, 'assetExternalId': asset_ext_id}.items() if not v] - message = f"Search result for {cas_number_to_search} is missing required keys: {missing_keys}" - print(f"Error: {message}") - # logging.error(message) - return False - - # --- Filtering Logic - Collect pairs of URLs --- - pages_to_process_list = [] # Store tuples: (page_name, raw_url, js_url) - # logging.debug(f"Filtering pages. Requested: {page_types_to_extract}.") - - for page_type in page_types_to_extract: - raw_url_key = page_type - js_url_key = f"{page_type}_js" - - raw_url = search_result.get(raw_url_key) - js_url = search_result.get(js_url_key) # Get the JS URL - - # Check if both URLs are valid strings - if raw_url and isinstance(raw_url, str) and raw_url.strip(): - if js_url and isinstance(js_url, str) and js_url.strip(): - pages_to_process_list.append((page_type, raw_url, js_url)) - # logging.debug(f"Found valid pair for '{page_type}': Raw='{raw_url}', JS='{js_url}'") - else: - # Found raw URL but not a valid JS URL - skip this page type for PDF? - # Or use raw_url for header too? Let's skip if JS URL is missing/invalid. - print(f"Found raw URL for '{page_type}' but missing/invalid JS URL ('{js_url}'). Skipping PDF generation for this type.") - # logging.warning(f"Missing/invalid JS URL for page type '{page_type}' for {rml_cas}. Raw URL: '{raw_url}'.") - else: - # Raw URL missing or invalid - if page_type in search_result: # Check if key existed at all - print(f"Found page type key '{page_type}' for {rml_cas}, but its value is not a valid URL ('{raw_url}'). Skipping.") - # logging.warning(f"Invalid raw URL value for page type '{page_type}' for {rml_cas}: '{raw_url}'.") - else: - print(f"Requested page type key '{page_type}' not found in search results for {rml_cas}.") - # logging.warning(f"Requested page type key '{page_type}' not found for {rml_cas}.") - # --- End Filtering Logic --- - - if not pages_to_process_list: - print(f"After filtering, no requested page types ({page_types_to_extract}) resulted in a valid pair of Raw and JS URLs for substance {rml_cas}.") - # logging.warning(f"No pages with valid URL pairs to process for substance {rml_cas}.") - return False # Nothing to generate - - except Exception as e: - print(f"Error processing search result for '{cas_number_to_search}': {e}") - traceback.print_exc() - # logging.error(f"Error processing search result for '{cas_number_to_search}': {e}", exc_info=True) - return False - - # --- 3. Prepare Folders --- - safe_cas = rml_cas.replace('/', '_').replace('\\', '_') - substance_folder_name = f"{safe_cas}_{rml_ec}_{rml_id}" - substance_folder_path = os.path.join(base_output_folder, substance_folder_name) - - try: - os.makedirs(substance_folder_path, exist_ok=True) - # logging.info(f"Ensured output directory exists: {substance_folder_path}") - print(f"Ensured output directory exists: {substance_folder_path}") - except OSError as e: - print(f"Error creating directory {substance_folder_path}: {e}") - # logging.error(f"Failed to create directory {substance_folder_path}: {e}", exc_info=True) - return False - - - # --- 4. Process Each Page (Save HTML, Generate PDF) --- - successful_pages = [] # Track successful PDF generations - overall_success = False # Track if any PDF was generated - - for page_name, raw_html_url, js_header_link in pages_to_process_list: - print(f"\nProcessing page: {page_name}") - base_filename = f"{safe_cas}_{page_name}" - html_filename = f"{base_filename}.html" - pdf_filename = f"{base_filename}.pdf" - html_full_path = os.path.join(substance_folder_path, html_filename) - pdf_full_path = os.path.join(substance_folder_path, pdf_filename) - - # --- Save Raw HTML --- - html_saved = False - try: - # logging.debug(f"Fetching raw HTML for {page_name} from {raw_html_url}") - print(f"Fetching raw HTML from: {raw_html_url}") - # Add headers to mimic a browser slightly if needed - headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'} - response = requests.get(raw_html_url, timeout=30, headers=headers) # 30s timeout - response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx) - - # Decide encoding - response.text tries to guess, or use apparent_encoding - # Or assume utf-8 if unsure, which is common. - html_content = response.content.decode('utf-8', errors='replace') - - with open(html_full_path, 'w', encoding='utf-8') as f: - f.write(html_content) - html_saved = True - # logging.info(f"Successfully saved raw HTML for {page_name} to {html_full_path}") - print(f"Successfully saved raw HTML to: {html_full_path}") - except requests.exceptions.RequestException as req_e: - print(f"Error fetching raw HTML for {page_name} from {raw_html_url}: {req_e}") - # logging.error(f"Error fetching raw HTML for {page_name}: {req_e}", exc_info=True) - except IOError as io_e: - print(f"Error saving raw HTML for {page_name} to {html_full_path}: {io_e}") - # logging.error(f"Error saving raw HTML for {page_name}: {io_e}", exc_info=True) - except Exception as e: # Catch other potential errors like decoding - print(f"Unexpected error saving HTML for {page_name}: {e}") - # logging.error(f"Unexpected error saving HTML for {page_name}: {e}", exc_info=True) - - # --- Generate PDF (using raw URL for content, JS URL for header link) --- - # logging.info(f"Generating PDF for {page_name} from {raw_html_url}") - print(f"Generating PDF using content from: {raw_html_url}") - pdf_success = generate_pdf_with_header_and_cleanup( - url=raw_html_url, # Use raw URL for Playwright navigation/content - pdf_path=pdf_full_path, - substance_name=rml_name, - substance_link=js_header_link, # Use JS URL for the link in the header - ec_number=rml_ec, - cas_number=rml_cas - ) - - if pdf_success: - successful_pages.append(page_name) # Log success based on PDF generation - overall_success = True - # logging.info(f"Successfully generated PDF for {page_name} at {pdf_full_path}") - print(f"Successfully generated PDF for {page_name}") - else: - # logging.error(f"Failed to generate PDF for {page_name} from {raw_html_url}") - print(f"Failed to generate PDF for {page_name}") - # Decide if failure to save HTML should affect overall success or logging... - # Currently, success is tied only to PDF generation. - - print(f"===== Finished request for CAS: {cas_number_to_search} =====") - print(f"Successfully generated {len(successful_pages)} PDFs: {successful_pages}") - return overall_success # Return success based on PDF generation - -from playwright.sync_api import sync_playwright - -with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - page.goto("https://chem.echa.europa.eu/html-pages-prod/e4c88c6e-06c7-4daa-b0fb-1a55459ac22f/documents/IUC5-5f55d8ec-7a71-4e2c-9955-8469ead9fe84_0035f3f8-7467-4944-9028-1db2e9c99565.html") - page.pdf(path='output.pdf') - browser.close() - diff --git a/old/echa_process.py b/old/echa_process.py deleted file mode 100644 index fbac2fe..0000000 --- a/old/echa_process.py +++ /dev/null @@ -1,947 +0,0 @@ -from pif_compiler.services.echa_find import search_dossier -from bs4 import BeautifulSoup -from markdownify import MarkdownConverter -import pandas as pd -import requests -import os -import re -import markdown_to_json -import json -import copy -import unicodedata -from datetime import datetime -import logging -import duckdb - -# Settings per il logging -logging.basicConfig( - format="{asctime} - {levelname} - {message}", - style="{", - datefmt="%Y-%m-%d %H:%M", - filename="echa.log", - encoding="utf-8", - filemode="a", - level=logging.INFO, -) - -try: - # Carico il full scraping in memoria se esiste - con = duckdb.connect() - os.chdir(".") # directory che legge python - res = con.sql(""" - CREATE TABLE echa_full_scraping AS - SELECT * FROM read_csv_auto('src\data\echa_full_scraping.csv'); - """) # leggi il file csv come db in memory - logging.info( - f"echa.echaProcess().main: Loaded echa scraped data into duckdb memory. First CAS in the df is: {con.sql('select CAS from echa_full_scraping limit 1').fetchone()[0]}" - ) - local_echa = True -except: - logging.error(f"echa.echaProcess().main: No local echa scraped data found") - - -# Metodo per trovare le informazioni relative sul sito echa -# Funziona sia con il nome della sostanza che con il CUS -def openEchaPage(link, local=False): - try: - if local: - page = open(link, encoding="utf8") - soup = BeautifulSoup(page, "html.parser") - else: - page = requests.get(link) - page.encoding = "utf-8" - soup = BeautifulSoup(page.text, "html.parser") - except: - logging.error( - f"echa.echaProcess.openEchaPage() error. could not open: '{link}'", - exc_info=True, - ) - return soup - - -# Metodo per trasformare la pagina dell'echa in un Markdown -def echaPage_to_md(sezione, scrapingType=None, local=False, substance=None): - # sezione : il soup della pagina estratta attraverso search_dossier - # scrapingType : 'RepeatedDose' o 'AcuteToxicity' - # local : se vuoi salvare il contenuto del markdown in locale. Utile per debuggare - # substance : il nome della sostanza. Per salvarla nel path corretto - - # Create shorthand method for conversion - def md(soup, **options): - return MarkdownConverter(**options).convert_soup(soup) - - output = md(sezione) - # Trasformo la section html in un markdown, che però va corretto. - - # Cambia un po' il modo in cui modifico il .md in base al tipo di pagina da scrappare - # aggiungo eccezioni man mano che testo nuove sostanze - if scrapingType == "RepeatedDose": - output = output.replace("### Oral route", "#### oral") - output = output.replace("### Dermal", "#### dermal") - output = output.replace("### Inhalation", "#### inhalation") - # Devo rimpiazzare >< con delle stringhe perchè sennò il jsonifier interpreta quei due simboli come dei codici e inserisce il testo nelle [] - output = re.sub(r">\s+", "greater than ", output) - # Replace '<' followed by whitespace with 'less than ' - output = re.sub(r"<\s+", "less than ", output) - output = re.sub(r">=\s*\n", "greater or equal than ", output) - output = re.sub(r"<=\s*\n", "less or equal than ", output) - - elif scrapingType == "AcuteToxicity": - # Devo rimpiazzare >< con delle stringhe perchè sennò il jsonifier interpreta quei due simboli come dei codici e inserisce il testo nelle [] - output = re.sub(r">\s+", "greater than ", output) - # Replace '<' followed by whitespace with 'less than ' - output = re.sub(r"<\s+", "less than ", output) - output = re.sub(r">=\s*\n", "greater or equal than", output) - output = re.sub(r"<=\s*\n", "less or equal than ", output) - - output = output.replace("–", "-") - - output = re.sub(r"\s+mg", " mg", output) - # sta parte serve per fixare le unità di misura che vanno a capo e sono separate dal valore - - if local and substance: - path = f"{scrapingType}/mds/{substance}.md" - os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, "w") as text_file: - text_file.write(output) - - return output - - - -# Questo è la parte 2 del processing del sito ECHA. Va trasformato il markdown in un JSON -def markdown_to_json_raw(output, scrapingType=None, local=False, substance=None): - # Output: Il markdown - # scrapingType : 'RepeatedDose' o 'AcuteToxicity' - # substance : il nome della sostanza. Per salvarla nel path corretto - jsonified = markdown_to_json.jsonify(output) - dictified = json.loads(jsonified) - - # Salvo il json iniziale così come esce da jsonify - if local and scrapingType and substance: - path = f"{scrapingType}/jsons/raws/{substance}_raw0.json" - os.makedirs(os.path.dirname(path), exist_ok=True) - - with open(path, "w") as text_file: - text_file.write(jsonified) - - # Ora splitto i contenuti dei dizionari innestati. - for key, value in dictified.items(): - if type(value) == dict: - for key2, value2 in value.items(): - parts = value2.split("\n\n") - dictified[key][key2] = { - parts[i]: parts[i + 1] - for i in range(0, len(parts) - 1, 2) - if parts[i + 1] != "[Empty]" - } - else: - parts = value.split("\n\n") - dictified[key] = { - parts[i]: parts[i + 1] - for i in range(0, len(parts) - 1, 2) - if parts[i + 1] != "[Empty]" - } - - jsonified = json.dumps(dictified) - - if local and scrapingType and substance: - path = f"{scrapingType}/jsons/raws/{substance}_raw1.json" - os.makedirs(os.path.dirname(path), exist_ok=True) - - with open(path, "w") as text_file: - text_file.write(jsonified) - - dictified = json.loads(jsonified) - - return jsonified - - -# Metodo creato da claude per risolvere i problemi di unicode characters -def normalize_unicode_characters(text): - """ - Normalize Unicode characters, with special handling for superscript - """ - if not isinstance(text, str): - return text - - # Specific replacements for common Unicode encoding issues - # e per altre eccezioni particolari - replacements = { - "\u00c2\u00b2": "²", # ² -> ² - "\u00c2\u00b3": "³", # ³ -> ³ - "\u00b2": "²", # Bare superscript 2 - "\u00b3": "³", # Bare superscript 3 - "\n": "", # ogni tanto ci sono degli \n brutti da togliere - "greater than": ">", - "less than": "<", - "greater or equal than": ">=", - "less or equal than": "<", - # Ste due entry le ho messe io. >< creano problemi quindi le rinonimo temporaneamente - } - - # Apply specific replacements first - for old, new in replacements.items(): - text = text.replace(old, new) - - # Normalize Unicode characters - text = unicodedata.normalize("NFKD", text) - - return text - - -# Un'altro metodo creato da Claude. -# Pare che il mio cervello sia troppo piccolo per riuscire a ciclare ricursivamente -# un dizionario innestato. Se magari avessimo fatto algoritmi e strutture dati... -def clean_json(data): - """ - Recursively clean JSON by removing empty/uninformative entries - and normalizing Unicode characters - """ - - def is_uninformative(value, context=None): - """ - Check if a dictionary entry is considered uninformative - - Args: - value: The value to check - context: Additional context about where the value is located - """ - # Specific exceptions - if context and context == "Key value for chemical safety assessment": - # Always keep all entries in this specific section - return False - - uninformative_values = ["hours/week", "", None] - - return value in uninformative_values or ( - isinstance(value, str) - and ( - value.strip() in uninformative_values - or value.lower() == "no information available" - ) - ) - - def clean_recursive(obj, context=None): - # If it's a dictionary, process its contents - if isinstance(obj, dict): - # Create a copy to modify - cleaned = {} - for key, value in obj.items(): - # Normalize key - normalized_key = normalize_unicode_characters(key) - - # Set context for nested dictionaries - new_context = context or normalized_key - - # Recursively clean nested structures - cleaned_value = clean_recursive(value, new_context) - - # Conditions for keeping the entry - keep_entry = ( - cleaned_value not in [None, {}, ""] - and not ( - isinstance(cleaned_value, dict) and len(cleaned_value) == 0 - ) - and not is_uninformative(cleaned_value, new_context) - ) - - # Add to cleaned dict if conditions are met - if keep_entry: - cleaned[normalized_key] = cleaned_value - - return cleaned if cleaned else None - - # If it's a list, clean each item - elif isinstance(obj, list): - cleaned_list = [clean_recursive(item, context) for item in obj] - cleaned_list = [item for item in cleaned_list if item not in [None, {}, ""]] - return cleaned_list if cleaned_list else None - - # For strings, normalize Unicode - elif isinstance(obj, str): - return normalize_unicode_characters(obj) - - # Return as-is for other types - return obj - - # Create a deep copy to avoid modifying original data - cleaned_data = clean_recursive(copy.deepcopy(data)) - # Sì figa questa è la parte che mi ha fatto sclerare - # Ciclare in dizionari innestati senza poter modificare la struttura - return cleaned_data - - -def json_to_dataframe(cleaned_json, scrapingType): - rows = [] - schema = { - "RepeatedDose": [ - "Substance", - "CAS", - "Toxicity Type", - "Route", - "Dose descriptor", - "Effect level", - "Species", - "Extraction_Timestamp", - "Endpoint conclusion", - ], - "AcuteToxicity": [ - "Substance", - "CAS", - "Route", - "Endpoint conclusion", - "Dose descriptor", - "Effect level", - "Extraction_Timestamp", - ], - } - if scrapingType == "RepeatedDose": - # Iterate through top-level sections (excluding 'Key value for chemical safety assessment') - for toxicity_type, routes in cleaned_json.items(): - if toxicity_type == "Key value for chemical safety assessment": - continue - - # Iterate through routes within each toxicity type - for route, details in routes.items(): - row = {"Toxicity Type": toxicity_type, "Route": route} - - # Add details to the row, excluding 'Link to relevant study record(s)' - row.update( - { - k: v - for k, v in details.items() - if k != "Link to relevant study record(s)" - } - ) - rows.append(row) - elif scrapingType == "AcuteToxicity": - for toxicity_type, routes in cleaned_json.items(): - if ( - toxicity_type == "Key value for chemical safety assessment" - or not routes - ): - continue - - row = { - "Route": toxicity_type.replace("Acute toxicity: via", "") - .replace("route", "") - .strip() - } - - # Add details directly from the routes dictionary - row.update( - { - k: v - for k, v in routes.items() - if k != "Link to relevant study record(s)" - } - ) - rows.append(row) - - # Create DataFrame - df = pd.DataFrame(rows) - - # Last moment fixes. Per forzare uno schema - fair_columns = list(set(schema["RepeatedDose"] + schema["AcuteToxicity"])) - df = df = df.loc[:, df.columns.intersection(fair_columns)] - return df - - -def save_dataframe(df, file_path, scrapingType, schema): - """ - Save DataFrame with strict column requirements. - - Args: - df (pd.DataFrame): DataFrame to potentially append - file_path (str): Path of CSV file - """ - # Mandatory columns for saved DataFrame - - saved_columns = schema[scrapingType] - - # Check if input DataFrame has at least Dose Descriptor and Effect Level - if not all(col in df.columns for col in ["Effect level"]): - return - - # If file exists, read it to get saved columns - if os.path.exists(file_path): - existing_df = pd.read_csv(file_path) - - # Reindex to match saved columns, filling missing with NaN - df = df.reindex(columns=saved_columns) - else: - # If file doesn't exist, create DataFrame with saved columns - df = df.reindex(columns=saved_columns) - - df = df[df["Effect level"].isna() == False] - # Ignoro le righe che non hanno valori per Effect Level - - # Append or save the DataFrame - df.to_csv( - file_path, - mode="a" if os.path.exists(file_path) else "w", - header=not os.path.exists(file_path), - index=False, - ) - - -def echaExtract( - substance: str, - scrapingType: str, - outputType="df", - key_infos=False, - local_search=False, - local_only = False -): - """ - Funzione principale per scrapare dal sito ECHA. Mette insieme tante funzioni diverse di ricerca, estrazione e pulizia. - Registra il logging delle operazioni. - - Args: - substance (str): CAS o nome della sostanza. Vanno bene entrambi ma il CAS funziona meglio. - scrapingType (str): 'AcuteToxicity' (LD50) o 'RepeatedDose' (NOAEL) - outputType (str): 'pd.DataFrame' o 'json' (sconsigliato) - key_infos (bool): Di base True. Specifica se cercare la sezione "Description of Key Information" nei dossiers. - Certe sostanze hanno i dati inseriti a cazzo e mettono le informazioni lì in forma discorsiva al posto che altrove. - - Output: - un dataframe o un json, - f"Non esistono lead dossiers attivi o inattivi per {substance}" - """ - - # se local_search = True tento una ricerca in locale. Altrimenti la provo online. - if local_search and local_echa: - result = echaExtract_local(substance, scrapingType, key_infos) - - if not result.empty: - logging.info( - f"echa.echaProcess.echaExtract(): Found local data for {scrapingType}, {substance}. Returning it." - ) - return result - elif result.empty: - logging.info( - f"echa.echaProcess.echaExtract(): Have not found local data for {scrapingType}, {substance}. Continuining." - ) - if local_only: - logging.info(f'echa.echaProcess.echaExtract(): No data found in local-only search for {substance}, {scrapingType}') - return f'No data found in local-only search for {substance}, {scrapingType}' - - try: - # con search_dossier trovo le informazioni relative al dossiers cercando sul sito echa la sostanza fornita. - links = search_dossier(substance) - if not links: - logging.info( - f'echaProcess.echaExtract(). no active or unactive lead dossiers for: "{substance}". Ending extraction.' - ) - return f"Non esistono lead dossiers attivi o inattivi per {substance}" - # Se non esistono LEAD dossiers (quelli con i riassunti tossicologici) attivi o inattivi - # LEAD dossiers: riassumono le informazioni di un po' di tutti gli altri dossier, sono quelli completi dove c'erano le info necessarie - - # Se esistono, apro la pagina che mi interessa ('Acute Toxicity' o 'Repeated Dose') - - if not scrapingType in list(links.keys()): - logging.info( - f'echaProcess.echaExtract(). No page for "{scrapingType}", "{substance}"' - ) - return f'No data in "{scrapingType}", "{substance}". Page does not exist.' - - soup = openEchaPage(link=links[scrapingType]) - logging.info( - f"echaProcess.echaExtract(). soupped '{scrapingType}' echa page for '{substance}'" - ) - - # Piglio la sezione che mi serve - try: - sezione = soup.find( - "section", - class_="KeyValueForChemicalSafetyAssessment", - attrs={"data-cy": "das-block"}, - ) - except: - logging.error( - f'echaProcess.echaExtract(). could not extract the "section" for "{scrapingType}" for "{substance}"', - exc_info=True, - ) - - # Per ottenere il timestamp attuale - now = datetime.now() - - # UPDATE. Cerco le key infos: recupera quel testo di summary generale - key_infos_faund = False - if key_infos: - try: - key_infos = soup.find( - "section", - class_="KeyInformation", - attrs={"data-cy": "das-block"}, - ) - if key_infos: - key_infos = key_infos.find( - "div", - class_="das-field_value das-field_value_html", - ) - key_infos = key_infos.text - key_infos = key_infos if key_infos.strip() != "[Empty]" else None - if key_infos: - key_infos_faund = True - logging.info( - f"echaProcess.echaExtract(). Extracted key_infos from '{scrapingType}' echa page for '{substance}': {key_infos}" - ) - key_infos_df = pd.DataFrame(index=[0]) - key_infos_df["key_information"] = key_infos - key_infos_df = df_wrapper( - df=key_infos_df, - rmlName=links["rmlName"], - rmlCas=links["rmlCas"], - timestamp=now.strftime("%Y-%m-%d"), - dossierType=links["dossierType"], # attivo o inattivo?? da verificare - page=scrapingType, # repeated dose o acute toxicity - linkPage=links[scrapingType], # i link al dossier di repeated dose o acute toxicity - key_infos=True, - ) - else: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not extract key_infos for "{scrapingType}", "{substance}"' - ) - else: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not extract key_infos for "{scrapingType}", "{substance}"' - ) - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not extract key_infos for "{scrapingType}", "{substance}"', - exc_info=True, - ) - - try: - if not sezione: # la sezione principale che viene scrapata - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() Empty section for the html > markdown conversion. No data for "{scrapingType}", "{substance}"' - ) - if not key_infos_faund: - # Se non ci sono dati ma ci sono le key informations ritorno quelle - return f'No data in "{scrapingType}", "{substance}"' - else: - return key_infos_df - - # Trasformo la sezione html in markdown - output = echaPage_to_md( - sezione, scrapingType=scrapingType, substance=substance - ) - logging.info( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() OK. created MD for "{scrapingType}", "{substance}"' - ) - - # Ci sono rari casi in cui proprio non esistono pagine per l'acute toxicity o la repeated dose. In quel caso output sarà vuoto e darà errore - # logging.info(output) - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.echaPage_to_md() ERROR. could not MD for "{scrapingType}", "{substance}"', - exc_info=True, - ) - - try: - # Trasformo il markdown nel primo json raw - jsonified = markdown_to_json_raw( - output, scrapingType=scrapingType, substance=substance - ) - logging.info( - f'echaProcess.echaExtract() > echaProcess.markdown_to_json_raw() OK. created initial json for "{scrapingType}", "{substance}"' - ) - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.markdown_to_json_raw() ERROR. could not create initial json for "{scrapingType}", "{substance}"', - exc_info=True, - ) - - json_data = json.loads(jsonified) - - try: - # Secondo step per il processing del json: pulisco i dizionari piu' innestati - cleaned_data = clean_json(json_data) - logging.info( - f'echaProcess.echaExtract() > echaProcess.clean_json() OK. cleaned the json for "{scrapingType}", "{substance}"' - ) - # Se cleaned_data è vuoto vuol dire che non ci sono dati - if not cleaned_data: - logging.error( - f'echaProcess.echaExtract() > echaProcess.clean_json() Empty cleaned_json. No data for "{scrapingType}", "{substance}"' - ) - if not key_infos_faund: - # Se non ci sono dati ma ci sono le key informations ritorno quelle - return f'No data in "{scrapingType}", "{substance}"' - else: - return key_infos_df - except: - logging.error( - f'echaProcess.echaExtract() > echaProcess.clean_json() ERROR. cleaning the json for "{scrapingType}", "{substance}"' - ) - - # Se si vuole come output il dataframe creo un dataframe e ci aggiungo un timestamp - try: - df = json_to_dataframe(cleaned_data, scrapingType) - df = df_wrapper( - df=df, - rmlName=links["rmlName"], - rmlCas=links["rmlCas"], - timestamp=now.strftime("%Y-%m-%d"), - dossierType=links["dossierType"], - page=scrapingType, - linkPage=links[scrapingType], - ) - - if outputType == "df": - logging.info( - f'echaProcess.echaExtract(). succesfully extracted "{scrapingType}", "{substance}". Returning df' - ) - - # Se l'utente vuole le key infos e le key_infos sono state trovate unisco i due df - return df if not key_infos_faund else pd.concat([key_infos_df, df]) - - elif outputType == "json": - if key_infos_faund: - df = pd.concat([key_infos_df, df]) - jayson = df.to_json(orient="records", force_ascii=False) - logging.info( - f'echaProcess.echaExtract(). succesfully extracted "{scrapingType}", "{substance}". Returning json' - ) - return jayson - except KeyError: - # Per gestire le pagine di merda che hanno solo "no information available" - - if key_infos_faund: - return key_infos_df - - json_output = list(cleaned_data[list(cleaned_data.keys())[0]].values()) - if json_output == ["no information available" for elem in json_output]: - logging.info( - f"echaProcess.echaExtract(). No data found for {scrapingType} for {substance}" - ) - return f'No data in "{scrapingType}", "{substance}"' - else: - logging.error( - f"echaProcess.json_to_dataframe(). Could not create dataframe" - ) - cleaned_data["error"] = ( - "Non sono riuscito a creare il dataframe, probabilmente non ci sono abbastanza informazioni. Ritorno il JSON" - ) - return cleaned_data - - except Exception: - logging.error( - f"echaProcess.echaExtract() ERROR. Something went wrong, not quite sure what.", - exc_info=True, - ) - - -def df_wrapper( - df, rmlName, rmlCas, timestamp, dossierType, page, linkPage, key_infos=False -): - # Un semplice metodo per aggiungere tutta la roba che ci serve al dataframe. - # Per non intasare echaExtract che già di suo è un figa di bordello - df.insert(0, "Substance", rmlName) - df.insert(1, "CAS", rmlCas) - df["Extraction_Timestamp"] = timestamp - df = df.replace("\n", "", regex=True) - if not key_infos: - df = df[df["Effect level"].isnull() == False] - - # Aggiungo il link del dossier e lo status - df["dossierType"] = dossierType - df["page"] = page - df["linkPage"] = linkPage - return df - -def echaExtract_specific( - CAS: str, - scrapingType="RepeatedDose", - doseDescriptor="NOAEL", - route="inhalation", - local_search=False, - local_only=False -): - """ - Dato un CAS cerca di trovare il dose descriptor (di base NOAEL) per la route specificata (di base 'inhalation'). - - Args: - CAS (str): il cas o in alternativa la sostanza - route (str): 'inhalation', 'oral', 'dermal'. Di base 'inhalation' - scrapingType (str): la pagina su cui cercarlo - doseDescriptor (str): il tipo di valore da ricercare (NOAEL, DNEL, LD50, LC50) - """ - - # Tento di estrarre - result = echaExtract( - substance=CAS, - scrapingType=scrapingType, - outputType="df", - local_search=local_search, - local_only=local_only - ) - - # Il risultato è un dataframe? - if type(result) == pd.DataFrame: - # Se sì, lo filtro per ciò che mi interessa - filtered_df = result[ - (result["Route"] == route) & (result["Dose descriptor"] == doseDescriptor) - ] - # Se non è vuoto lo ritorno - if not filtered_df.empty: - return filtered_df - else: - return f'Non ho trovato {doseDescriptor} in {scrapingType} con route "{route}" per {CAS}' - - elif type(result) == dict and result["error"]: - # Questo significa che gli è arrivato qualche json con un errore - return f'Non ho trovato {doseDescriptor} in {scrapingType} con route "{route}" per {CAS}' - - # Questo significa che ha ricevuto un "Non esistono" come risultato. Non esistono lead dossiers attivi o inattivi per la sostanza ricercata - elif result.startswith("Non esistono"): - return result - - -def echa_noael_ld50(CAS: str, route="inhalation", outputType="df", local_search=False, local_only=False): - """ - Dato un CAS cerca di trovare il NOAEL per la route specificata (di base 'inhalation'). - Se non esiste la pagina RepeatedDose con il NOAEL fa ritornare l'LD50 per quella route. - - Args: - CAS (str): il cas o in alternativa la sostanza - route (str): 'inhalation', 'oral', 'dermal'. Di base 'inhalation' - outputType (str) = 'df', 'json'. Il tipo di output - - """ - if route not in ["inhalation", "oral", "dermal"] and outputType not in [ - "df", - "json", - ]: - return "invalid input" - # Di base cerco di scrapare la pagina "Repeated Dose" - first_attempt = echaExtract_specific( - CAS=CAS, - scrapingType="RepeatedDose", - doseDescriptor="NOAEL", - route=route, - local_search=local_search, - local_only=local_only - ) - - if isinstance(first_attempt, pd.DataFrame): - return first_attempt - elif isinstance(first_attempt, str) and first_attempt.startswith("Non ho trovato"): - second_attempt = echaExtract_specific( - CAS=CAS, - scrapingType="AcuteToxicity", - doseDescriptor="LD50", - route=route, - local_search=True, - local_only=local_only - ) - if isinstance(second_attempt, pd.DataFrame): - return second_attempt - elif isinstance(second_attempt, str) and second_attempt.startswith( - "Non ho trovato" - ): - return second_attempt.replace("LD50", "NOAEL ed LD50") - elif first_attempt.startswith("Non esistono"): - return first_attempt - - -def echa_noael_ld50_multi( - casList: list, route="inhalation", messages=False, local_search=False, local_only=False -): - """ - Metodo abbastanza semplice. Data una lista di cas esegue echa_noael_ld50. Quindi cerca i NOAEL per la route desiderata o gli LD50 se non trova i NOAEL. - L'output è un df per le sostanze che trova e una lista di messaggi per quelle che non trova. - - Args: - casList (list): la lista di CAS - route (str): 'inhalation', 'oral', 'dermal'. Di base 'inhalation' - messages (boolean) = True o False. Con True fa ritornare una lista. Il primo elemento sarà il dataframe, il secondo la lista di messaggi per le sostanze non trovate. - Di base è False e fa ritornare solo il dataframe. - """ - messages_list = [] - df = pd.DataFrame() - for CAS in casList: - output = echa_noael_ld50( - CAS=CAS, route=route, outputType="df", local_search=local_search, local_only=local_only - ) - if isinstance(output, str): - messages_list.append(output) - elif isinstance(output, pd.DataFrame): - df = pd.concat([df, output], ignore_index=True) - df.dropna(axis=1, how="all", inplace=True) - if messages and df.empty: - messages_list.append( - f'Non sono riuscito a trovare nessun NOAEL o LD50 per i cas per la route "{route}"' - ) - return [None, messages_list] - elif messages and not df.empty: - return [df, messages_list] - elif not df.empty and not messages: - return df - elif df.empty and not messages: - return f'Non sono riuscito a trovare nessun NOAEL o LD50 per i cas per la route "{route}"' - - -def echaExtract_multi( - casList: list, - scrapingType="all", - local=False, - local_path=None, - log_path=None, - debug_print=False, - error=False, - error_path=None, - key_infos=False, - local_search=False, - local_only=False, - filter = None -): - """ - Data una lista di CAS cerca di estrarre tutte le pagine con le Repeated Dose, tutte le pagine con l'AcuteToxicity, o entrambe - - Args: - casList (list): la lista di CAS - scrapingType (str): 'RepeatedDose', 'AcuteToxicity', 'all' - local (boolean): Se impostato su True questo parametro salva sul disco in maniera progressiva, appendendo ogni result man mano che li trova. - è necessario per lo scraping su larga scala - log_path (str): il path per il log da fillare durante lo scraping di massa - debug_print (bool): per avere il printing durante lo scraping. per verificare l'avanzamento - error (bool): Per far ritornare la lista degli errori una volta scrapato - - Output: - pd.Dataframe - """ - cas_len = len(casList) - i = 0 - - df = pd.DataFrame() - if scrapingType == "all": - scrapingTypeList = ["RepeatedDose", "AcuteToxicity"] - else: - scrapingTypeList = [scrapingType] - - logging.info( - f"echa.echaExtract_multi(). Commencing mass extraction of {scrapingTypeList} for {casList}" - ) - - errors = [] - - for cas in casList: - for scrapingType in scrapingTypeList: - extraction = echaExtract( - substance=cas, - scrapingType=scrapingType, - outputType="df", - key_infos=key_infos, - local_search=local_search, - local_only=local_only - ) - if isinstance(extraction, pd.DataFrame) and not extraction.empty: - status = "successful_scrape" - logging.info( - f"echa.echaExtract_multi(). Succesfully scraped {scrapingType} for {cas}" - ) - - df = pd.concat([df, extraction], ignore_index=True) - if local and local_path: - df.to_csv(local_path, index=False) - - elif ( - (isinstance(extraction, pd.DataFrame) and extraction.empty) - or (extraction is None) - or (isinstance(extraction, str) and extraction.startswith("No data")) - ): - status = "no_data_found" - logging.info( - f"echa.echaExtract_multi(). Found no data for {scrapingType} for {cas}" - ) - elif isinstance(extraction, dict): - if extraction["error"]: - status = "df_creation_error" - errors.append(extraction) - logging.info( - f"echa.echaExtract_multi(). Df creation error for {scrapingType} for {cas}" - ) - elif isinstance(extraction, str) and extraction.startswith("Non esistono"): - status = "no_lead_dossiers" - logging.info( - f"echa.echaExtract_multi(). Found no lead dossiers for {cas}" - ) - else: - status = "unknown_error" - logging.error( - f"echa.echaExtract_multi(). Unknown error for {scrapingType} for {cas}" - ) - - if log_path: - fill_log(cas, status, log_path, scrapingType) - if debug_print: - print(f"{i}: {cas}, {scrapingType}") - i += 1 - - if error and errors and error_path: - with open(error_path, "w") as json_file: - json.dump(errors, json_file, indent=4) - - # Questa è la mossa che mi permette di eliminare 4 metodi - if filter: - df = filter_dataframe_by_dict(df, filter) - return df - - -def fill_log(cas: str, status: str, log_path: str, scrapingType: str): - """ - Funzione usata durante lo scraping di massa per fillare un log mentre estraggo le sostanze - """ - - df = pd.read_csv(log_path) - df.loc[df["casNo"] == cas, f"scraping_{scrapingType}"] = status - df.loc[df["casNo"] == cas, "timestamp"] = datetime.now().strftime("%Y-%m-%d") - - df.to_csv(log_path, index=False) - -def echaExtract_local(substance:str, scrapingType:str, key_infos=False): - if not key_infos: - query = f""" - SELECT * - FROM echa_full_scraping - WHERE CAS = '{substance}' AND page = '{scrapingType}' AND key_information IS NULL; - """ - elif key_infos: - query = f""" - SELECT * - FROM echa_full_scraping - WHERE CAS = '{substance}' AND page = '{scrapingType}'; - - """ - result = con.sql(query).df() - return result - -def filter_dataframe_by_dict(df, filter_dict): - """ - Filters a Pandas DataFrame based on a dictionary. - - Args: - df (pd.DataFrame): The input DataFrame. - filter_dict (dict): A dictionary where keys are column names and - values are lists of allowed values for that column. - - Returns: - pd.DataFrame: A new DataFrame containing only the rows that match - the filter criteria. - """ - - filter_condition = pd.Series(True, index=df.index) # Initialize with all True to start filtering - - for column_name, allowed_values in filter_dict.items(): - if column_name in df.columns: # Check if the column exists in the DataFrame - column_filter = df[column_name].isin(allowed_values) # Create a boolean Series for the current column - filter_condition = filter_condition & column_filter # Combine with existing condition using 'and' - else: - print(f"Warning: Column '{column_name}' not found in the DataFrame. Filter for this column will be ignored.") - - filtered_df = df[filter_condition] # Apply the combined filter condition - return filtered_df diff --git a/old/pdf_extraction.py b/old/pdf_extraction.py deleted file mode 100644 index b246423..0000000 --- a/old/pdf_extraction.py +++ /dev/null @@ -1,467 +0,0 @@ -import os -import base64 -import traceback -import logging # Import logging module -import datetime -import pandas as pd -# import time # Keep if you use page.wait_for_timeout -from playwright.sync_api import sync_playwright, TimeoutError # Catch specific errors -from src.func.find import search_dossier -import requests - -# --- Basic Logging Setup (Commented Out) --- -# # Configure logging - uncomment and customize level/handler as needed -# logging.basicConfig( -# level=logging.INFO, # Or DEBUG for more details -# format='%(asctime)s - %(levelname)s - %(message)s', -# # filename='pdf_generator.log', # Optional: Log to a file -# # filemode='a' -# ) -# --- End Logging Setup --- - - -# Assume svg_to_data_uri is defined elsewhere correctly -def svg_to_data_uri(svg_path): - try: - if not os.path.exists(svg_path): - # logging.error(f"SVG file not found: {svg_path}") # Example logging - raise FileNotFoundError(f"SVG file not found: {svg_path}") - with open(svg_path, 'rb') as f: - svg_content = f.read() - encoded_svg = base64.b64encode(svg_content).decode('utf-8') - return f"data:image/svg+xml;base64,{encoded_svg}" - except Exception as e: - print(f"Error converting SVG {svg_path}: {e}") - # logging.error(f"Error converting SVG {svg_path}: {e}", exc_info=True) # Example logging - return None - -# --- JavaScript Expressions --- - -# Define the cleanup logic as an immediately-invoked arrow function expression -# NOTE: .das-block_empty removal is currently disabled as per previous step -cleanup_js_expression = """ -() => { - console.log('Running cleanup JS (DISABLED .das-block_empty removal)...'); - let totalRemoved = 0; - - // Example 1: Remove sections explicitly marked as empty (Currently Disabled) - // const emptyBlocks = document.querySelectorAll('.das-block_empty'); - // emptyBlocks.forEach(el => { - // if (el && el.parentNode) { - // console.log(`Removing '.das-block_empty' block with ID: ${el.id || 'N/A'}`); - // el.remove(); - // totalRemoved++; - // } - // }); - - // Add other specific cleanup logic here if needed - - console.log(`Cleanup script removed ${totalRemoved} elements (DISABLED .das-block_empty removal).`); - return totalRemoved; // Return the count -} -""" -# --- End JavaScript Expressions --- - - -def generate_pdf_with_header_and_cleanup( - url, - pdf_path, - substance_name, - substance_link, - ec_number, - cas_number, - header_template_path=r"src\func\resources\injectableHeader.html", - echa_chem_logo_path=r"src\func\resources\echa_chem_logo.svg", - echa_logo_path=r"src\func\resources\ECHA_Logo.svg" -) -> bool: # Added return type hint - """ - Generates a PDF with a dynamic header and optionally removes empty sections. - Provides basic logging (commented out) and returns True/False for success/failure. - - Args: - url (str): The target URL OR local HTML file path. - pdf_path (str): The output PDF path. - substance_name (str): The name of the chemical substance. - substance_link (str): The URL the substance name should link to (in header). - ec_number (str): The EC number for the substance. - cas_number (str): The CAS number for the substance. - header_template_path (str): Path to the HTML header template file. - echa_chem_logo_path (str): Path to the echa_chem_logo.svg file. - echa_logo_path (str): Path to the ECHA_Logo.svg file. - - Returns: - bool: True if the PDF was generated successfully, False otherwise. - """ - final_header_html = None - # logging.info(f"Starting PDF generation for URL: {url} to path: {pdf_path}") # Example logging - - # --- 1. Prepare Header HTML --- - try: - # logging.debug(f"Reading header template from: {header_template_path}") # Example logging - print(f"Reading header template from: {header_template_path}") - if not os.path.exists(header_template_path): - raise FileNotFoundError(f"Header template file not found: {header_template_path}") - with open(header_template_path, 'r', encoding='utf-8') as f: - header_template_content = f.read() - if not header_template_content: - raise ValueError("Header template file is empty.") - - # logging.debug("Converting logos...") # Example logging - print("Converting logos...") - logo1_data_uri = svg_to_data_uri(echa_chem_logo_path) - logo2_data_uri = svg_to_data_uri(echa_logo_path) - if not logo1_data_uri or not logo2_data_uri: - raise ValueError("Failed to convert one or both logos to Data URIs.") - - # logging.debug("Replacing placeholders...") # Example logging - print("Replacing placeholders...") - final_header_html = header_template_content.replace("##ECHA_CHEM_LOGO_SRC##", logo1_data_uri) - final_header_html = final_header_html.replace("##ECHA_LOGO_SRC##", logo2_data_uri) - final_header_html = final_header_html.replace("##SUBSTANCE_NAME##", substance_name) - final_header_html = final_header_html.replace("##SUBSTANCE_LINK##", substance_link) - final_header_html = final_header_html.replace("##EC_NUMBER##", ec_number) - final_header_html = final_header_html.replace("##CAS_NUMBER##", cas_number) - - if "##" in final_header_html: - print("Warning: Not all placeholders seem replaced in the header HTML.") - # logging.warning("Not all placeholders seem replaced in the header HTML.") # Example logging - - except Exception as e: - print(f"Error during header setup phase: {e}") - traceback.print_exc() - # logging.error(f"Error during header setup phase: {e}", exc_info=True) # Example logging - return False # Return False on header setup failure - # --- End Header Prep --- - - # --- CSS Override Definition --- - # Using Revision 4 from previous step (simplified breaks, boundary focus) - selectors_to_fix = [ - '.das-field .das-field_value_html', - '.das-field .das-field_value_large', - '.das-field .das-value_remark-text' - ] - css_selector_string = ",\n".join(selectors_to_fix) - css_override = f""" - -""" - # --- End CSS Override Definition --- - - # --- Playwright Automation --- - try: - with sync_playwright() as p: - # logging.debug("Launching browser...") # Example logging - # browser = p.chromium.launch(headless=False, devtools=True) # For debugging - browser = p.chromium.launch() - page = browser.new_page() - # Capture console messages (Corrected: use msg.text) - page.on("console", lambda msg: print(f"Browser Console: {msg.text}")) - - try: - # logging.info(f"Navigating to page: {url}") # Example logging - print(f"Navigating to: {url}") - if os.path.exists(url) and not url.startswith('file://'): - page_url = f'file://{os.path.abspath(url)}' - # logging.info(f"Treating as local file: {page_url}") # Example logging - print(f"Treating as local file: {page_url}") - else: - page_url = url - - page.goto(page_url, wait_until='load', timeout=90000) - # logging.info("Page navigation complete.") # Example logging - - # logging.debug("Injecting header HTML...") # Example logging - print("Injecting header HTML...") - page.evaluate(f'(headerHtml) => {{ document.body.insertAdjacentHTML("afterbegin", headerHtml); }}', final_header_html) - - # logging.debug("Injecting CSS overrides...") # Example logging - print("Injecting CSS overrides...") - page.evaluate(f"""(css) => {{ - const existingStyle = document.getElementById('pdf-override-styles'); - if (existingStyle) existingStyle.remove(); - document.head.insertAdjacentHTML('beforeend', css); - }}""", css_override) - - # logging.debug("Running JavaScript cleanup function...") # Example logging - print("Running JavaScript cleanup function...") - elements_removed_count = page.evaluate(cleanup_js_expression) - # logging.info(f"Cleanup script finished (reported removing {elements_removed_count} elements).") # Example logging - print(f"Cleanup script finished (reported removing {elements_removed_count} elements).") - - - # --- Optional: Emulate Print Media --- - # print("Emulating print media...") - # page.emulate_media(media='print') - - # --- Generate PDF --- - # logging.info(f"Generating PDF: {pdf_path}") # Example logging - print(f"Generating PDF: {pdf_path}") - pdf_options = { - "path": pdf_path, "format": "A4", "print_background": True, - "margin": {'top': '20px', 'bottom': '20px', 'left': '20px', 'right': '20px'}, - "scale": 1.0 - } - page.pdf(**pdf_options) - # logging.info(f"PDF saved successfully to: {pdf_path}") # Example logging - print(f"PDF saved successfully to: {pdf_path}") - - # logging.debug("Closing browser.") # Example logging - print("Closing browser.") - browser.close() - return True # Indicate success - - except TimeoutError as e: - print(f"A Playwright TimeoutError occurred: {e}") - traceback.print_exc() - # logging.error(f"Playwright TimeoutError occurred: {e}", exc_info=True) # Example logging - browser.close() # Ensure browser is closed on error - return False # Indicate failure - except Exception as e: # Catch other potential errors during Playwright page operations - print(f"An unexpected error occurred during Playwright page operations: {e}") - traceback.print_exc() - # logging.error(f"Unexpected error during Playwright page operations: {e}", exc_info=True) # Example logging - # Optional: Save HTML state on error - try: - html_content = page.content() - error_html_path = pdf_path.replace('.pdf', '_error.html') - with open(error_html_path, 'w', encoding='utf-8') as f_err: - f_err.write(html_content) - # logging.info(f"Saved HTML state on error to: {error_html_path}") # Example logging - print(f"Saved HTML state on error to: {error_html_path}") - except Exception as save_e: - # logging.error(f"Could not save HTML state on error: {save_e}", exc_info=True) # Example logging - print(f"Could not save HTML state on error: {save_e}") - browser.close() # Ensure browser is closed on error - return False # Indicate failure - # Note: The finally block for the 'with sync_playwright()' context - # is handled automatically by the 'with' statement. - - except Exception as e: - # Catch errors during Playwright startup (less common) - print(f"An error occurred during Playwright setup/teardown: {e}") - traceback.print_exc() - # logging.error(f"Error during Playwright setup/teardown: {e}", exc_info=True) # Example logging - return False # Indicate failure - - -# --- Example Usage --- -# result = generate_pdf_with_header_and_cleanup( -# url='path/to/your/input.html', -# pdf_path='output.pdf', -# substance_name='Glycerol Example', -# substance_link='http://example.com/glycerol', -# ec_number='200-289-5', -# cas_number='56-81-5', -# ) -# -# if result: -# print("PDF Generation Succeeded.") -# # logging.info("Main script: PDF Generation Succeeded.") # Example logging -# else: -# print("PDF Generation Failed.") -# # logging.error("Main script: PDF Generation Failed.") # Example logging - - -def search_generate_pdfs( - cas_number_to_search: str, - page_types_to_extract: list[str], - base_output_folder: str = "data/library" -) -> bool: - """ - Searches for a substance by CAS, saves raw HTML and generates PDFs for - specified page types. Uses '_js' link variant for the PDF header link if available. - - Args: - cas_number_to_search (str): CAS number to search for. - page_types_to_extract (list[str]): List of page type names (e.g., 'RepeatedDose'). - Expects '{page_type}' and '{page_type}_js' keys - in the search result. - base_output_folder (str): Root directory for saving HTML/PDFs. - - Returns: - bool: True if substance found and >=1 requested PDF generated, False otherwise. - """ - # logging.info(f"Starting process for CAS: {cas_number_to_search}") - print(f"\n===== Processing request for CAS: {cas_number_to_search} =====") - - # --- 1. Search for Dossier Information --- - try: - # logging.debug(f"Calling search_dossier for CAS: {cas_number_to_search}") - search_result = search_dossier(substance=cas_number_to_search, input_type='rmlCas') - except Exception as e: - print(f"Error during dossier search for CAS '{cas_number_to_search}': {e}") - traceback.print_exc() - # logging.error(f"Exception during search_dossier for CAS '{cas_number_to_search}': {e}", exc_info=True) - return False - - if not search_result: - print(f"Substance not found or search failed for CAS: {cas_number_to_search}") - # logging.warning(f"Substance not found or search failed for CAS: {cas_number_to_search}") - return False - - # logging.info(f"Substance found for CAS: {cas_number_to_search}") - print(f"Substance found: {search_result.get('rmlName', 'N/A')}") - - # --- 2. Extract Details and Filter Pages --- - try: - # Extract required info - rml_id = search_result.get('rmlId') - rml_name = search_result.get('rmlName') - rml_cas = search_result.get('rmlCas') - rml_ec = search_result.get('rmlEc') - asset_ext_id = search_result.get('assetExternalId') - - # Basic validation - if not all([rml_id, rml_name, rml_cas, rml_ec, asset_ext_id]): - missing_keys = [k for k, v in {'rmlId': rml_id, 'rmlName': rml_name, 'rmlCas': rml_cas, 'rmlEc': rml_ec, 'assetExternalId': asset_ext_id}.items() if not v] - message = f"Search result for {cas_number_to_search} is missing required keys: {missing_keys}" - print(f"Error: {message}") - # logging.error(message) - return False - - # --- Filtering Logic - Collect pairs of URLs --- - pages_to_process_list = [] # Store tuples: (page_name, raw_url, js_url) - # logging.debug(f"Filtering pages. Requested: {page_types_to_extract}.") - - for page_type in page_types_to_extract: - raw_url_key = page_type - js_url_key = f"{page_type}_js" - - raw_url = search_result.get(raw_url_key) - js_url = search_result.get(js_url_key) # Get the JS URL - - # Check if both URLs are valid strings - if raw_url and isinstance(raw_url, str) and raw_url.strip(): - if js_url and isinstance(js_url, str) and js_url.strip(): - pages_to_process_list.append((page_type, raw_url, js_url)) - # logging.debug(f"Found valid pair for '{page_type}': Raw='{raw_url}', JS='{js_url}'") - else: - # Found raw URL but not a valid JS URL - skip this page type for PDF? - # Or use raw_url for header too? Let's skip if JS URL is missing/invalid. - print(f"Found raw URL for '{page_type}' but missing/invalid JS URL ('{js_url}'). Skipping PDF generation for this type.") - # logging.warning(f"Missing/invalid JS URL for page type '{page_type}' for {rml_cas}. Raw URL: '{raw_url}'.") - else: - # Raw URL missing or invalid - if page_type in search_result: # Check if key existed at all - print(f"Found page type key '{page_type}' for {rml_cas}, but its value is not a valid URL ('{raw_url}'). Skipping.") - # logging.warning(f"Invalid raw URL value for page type '{page_type}' for {rml_cas}: '{raw_url}'.") - else: - print(f"Requested page type key '{page_type}' not found in search results for {rml_cas}.") - # logging.warning(f"Requested page type key '{page_type}' not found for {rml_cas}.") - # --- End Filtering Logic --- - - if not pages_to_process_list: - print(f"After filtering, no requested page types ({page_types_to_extract}) resulted in a valid pair of Raw and JS URLs for substance {rml_cas}.") - # logging.warning(f"No pages with valid URL pairs to process for substance {rml_cas}.") - return False # Nothing to generate - - except Exception as e: - print(f"Error processing search result for '{cas_number_to_search}': {e}") - traceback.print_exc() - # logging.error(f"Error processing search result for '{cas_number_to_search}': {e}", exc_info=True) - return False - - # --- 3. Prepare Folders --- - safe_cas = rml_cas.replace('/', '_').replace('\\', '_') - substance_folder_name = f"{safe_cas}_{rml_ec}_{rml_id}" - substance_folder_path = os.path.join(base_output_folder, substance_folder_name) - - try: - os.makedirs(substance_folder_path, exist_ok=True) - # logging.info(f"Ensured output directory exists: {substance_folder_path}") - print(f"Ensured output directory exists: {substance_folder_path}") - except OSError as e: - print(f"Error creating directory {substance_folder_path}: {e}") - # logging.error(f"Failed to create directory {substance_folder_path}: {e}", exc_info=True) - return False - - - # --- 4. Process Each Page (Save HTML, Generate PDF) --- - successful_pages = [] # Track successful PDF generations - overall_success = False # Track if any PDF was generated - - for page_name, raw_html_url, js_header_link in pages_to_process_list: - print(f"\nProcessing page: {page_name}") - base_filename = f"{safe_cas}_{page_name}" - html_filename = f"{base_filename}.html" - pdf_filename = f"{base_filename}.pdf" - html_full_path = os.path.join(substance_folder_path, html_filename) - pdf_full_path = os.path.join(substance_folder_path, pdf_filename) - - # --- Save Raw HTML --- - html_saved = False - try: - # logging.debug(f"Fetching raw HTML for {page_name} from {raw_html_url}") - print(f"Fetching raw HTML from: {raw_html_url}") - # Add headers to mimic a browser slightly if needed - headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'} - response = requests.get(raw_html_url, timeout=30, headers=headers) # 30s timeout - response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx) - - # Decide encoding - response.text tries to guess, or use apparent_encoding - # Or assume utf-8 if unsure, which is common. - html_content = response.content.decode('utf-8', errors='replace') - - with open(html_full_path, 'w', encoding='utf-8') as f: - f.write(html_content) - html_saved = True - # logging.info(f"Successfully saved raw HTML for {page_name} to {html_full_path}") - print(f"Successfully saved raw HTML to: {html_full_path}") - except requests.exceptions.RequestException as req_e: - print(f"Error fetching raw HTML for {page_name} from {raw_html_url}: {req_e}") - # logging.error(f"Error fetching raw HTML for {page_name}: {req_e}", exc_info=True) - except IOError as io_e: - print(f"Error saving raw HTML for {page_name} to {html_full_path}: {io_e}") - # logging.error(f"Error saving raw HTML for {page_name}: {io_e}", exc_info=True) - except Exception as e: # Catch other potential errors like decoding - print(f"Unexpected error saving HTML for {page_name}: {e}") - # logging.error(f"Unexpected error saving HTML for {page_name}: {e}", exc_info=True) - - # --- Generate PDF (using raw URL for content, JS URL for header link) --- - # logging.info(f"Generating PDF for {page_name} from {raw_html_url}") - print(f"Generating PDF using content from: {raw_html_url}") - pdf_success = generate_pdf_with_header_and_cleanup( - url=raw_html_url, # Use raw URL for Playwright navigation/content - pdf_path=pdf_full_path, - substance_name=rml_name, - substance_link=js_header_link, # Use JS URL for the link in the header - ec_number=rml_ec, - cas_number=rml_cas - ) - - if pdf_success: - successful_pages.append(page_name) # Log success based on PDF generation - overall_success = True - # logging.info(f"Successfully generated PDF for {page_name} at {pdf_full_path}") - print(f"Successfully generated PDF for {page_name}") - else: - # logging.error(f"Failed to generate PDF for {page_name} from {raw_html_url}") - print(f"Failed to generate PDF for {page_name}") - # Decide if failure to save HTML should affect overall success or logging... - # Currently, success is tied only to PDF generation. - - print(f"===== Finished request for CAS: {cas_number_to_search} =====") - print(f"Successfully generated {len(successful_pages)} PDFs: {successful_pages}") - return overall_success # Return success based on PDF generation \ No newline at end of file diff --git a/old/resources/ECHA_Logo.svg b/old/resources/ECHA_Logo.svg deleted file mode 100644 index 8d1697c..0000000 --- a/old/resources/ECHA_Logo.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/old/resources/echa_chem_logo.svg b/old/resources/echa_chem_logo.svg deleted file mode 100644 index 3113df4..0000000 --- a/old/resources/echa_chem_logo.svg +++ /dev/null @@ -1,141 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/old/resources/injectableHeader.html b/old/resources/injectableHeader.html deleted file mode 100644 index 8343047..0000000 --- a/old/resources/injectableHeader.html +++ /dev/null @@ -1,184 +0,0 @@ - - - -
- -
-
- - -

##SUBSTANCE_NAME##

-
-
-
- - ##EC_NUMBER## -
-
- - ##CAS_NUMBER## -
-
- -
-
-
- \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 1e432aa..6f893f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "markdown-to-json>=2.1.2", "markdownify>=1.2.0", "playwright>=1.55.0", + "psycopg2>=2.9.11", "pubchemprops>=0.1.1", "pubchempy>=1.0.5", "pydantic>=2.11.10", diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 814c796..0000000 --- a/pytest.ini +++ /dev/null @@ -1,28 +0,0 @@ -[pytest] -# Pytest configuration for PIF Compiler - -# Test discovery -testpaths = tests -python_files = test_*.py -python_classes = Test* -python_functions = test_* - -# Output options -addopts = - -v - --strict-markers - --tb=short - --disable-warnings - -# Markers for different test types -markers = - unit: Unit tests (fast, no external dependencies) - integration: Integration tests (may hit real APIs) - slow: Slow tests (skip by default) - database: Tests requiring MongoDB - -# Coverage options (if pytest-cov is installed) -# addopts = --cov=src/pif_compiler --cov-report=html --cov-report=term - -# Ignore patterns -norecursedirs = .git .venv __pycache__ *.egg-info dist build diff --git a/src/pif_compiler/classes/models.py b/src/pif_compiler/classes/models.py index 09ea081..491f96d 100644 --- a/src/pif_compiler/classes/models.py +++ b/src/pif_compiler/classes/models.py @@ -1,73 +1,423 @@ -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Any -from datetime import datetime -from pydantic import BaseModel, StringConstraints, Field -from typing_extensions import Annotated -from pif_compiler.classes.types_enum import CosmeticType, PhysicalForm, PlaceApplication, NormalUser, RoutesExposure, NanoRoutes +from pydantic import BaseModel, Field, field_validator, ConfigDict, model_validator, computed_field +import re +from typing import List, Optional +from datetime import datetime as dt +from pif_compiler.services.srv_echa import extract_levels, at_extractor, rdt_extractor, orchestrator +from pif_compiler.functions.db_utils import postgres_connect +from pif_compiler.services.srv_pubchem import pubchem_dap +from pif_compiler.services.srv_cosing import cosing_entry + + +class DapInfo(BaseModel): + cas: str + + molecular_weight: Optional[float] = Field(default=None, description="In Daltons (Da)") + high_ionization: Optional[float] = Field(default=None, description="High degree of ionization") + log_pow: Optional[float] = Field(default=None, description="Partition coefficient") + tpsa: Optional[float] = Field(default=None, description="Topological polar surface area") + melting_point: Optional[float] = Field(default=None, description="In Celsius (°C)") + + # --- Il valore DAP Calcolato --- + # Lo impostiamo di default a 0.5 (50%), verrà sovrascritto dal validator + dap_value: float = 0.5 + + @model_validator(mode='after') + def compute_dap(self): + # Lista delle condizioni (True se la condizione riduce l'assorbimento) + conditions = [] + + # 1. MW > 500 Da + if self.molecular_weight is not None: + conditions.append(self.molecular_weight > 500) + + # 2. High Ionization (Se è True, riduce l'assorbimento) + if self.high_ionization is not None: + conditions.append(self.high_ionization is True) + + # 3. Log Pow <= -1 OR >= 4 + if self.log_pow is not None: + conditions.append(self.log_pow <= -1 or self.log_pow >= 4) + + # 4. TPSA > 120 Å2 + if self.tpsa is not None: + conditions.append(self.tpsa > 120) + + # 5. Melting Point > 200°C + if self.melting_point is not None: + conditions.append(self.melting_point > 200) + + # LOGICA FINALE: + # Se c'è almeno una condizione "sicura" (True), il DAP è 0.1 + if any(conditions): + self.dap_value = 0.1 + else: + self.dap_value = 0.5 + + return self + + @classmethod + def dap_builder(cls, dap_data: dict): + """ + Costruisce un oggetto DapInfo a partire dai dati grezzi. + """ + desiderated_keys = ['CAS', 'MolecularWeight', 'XLogP', 'TPSA', 'Melting Point', 'Dissociation Constants'] + actual_keys = [key for key in dap_data.keys() if key in desiderated_keys] + + dict = {} + + for key in actual_keys: + if key == 'CAS': + dict['cas'] = dap_data[key] + if key == 'MolecularWeight': + mw = float(dap_data[key]) + dict['molecular_weight'] = mw + if key == 'XLogP': + log_pow = float(dap_data[key]) + dict['log_pow'] = log_pow + if key == 'TPSA': + tpsa = float(dap_data[key]) + dict['tpsa'] = tpsa + if key == 'Melting Point': + try: + for item in dap_data[key]: + if '°C' in item['Value']: + mp = dap_data[key]['Value'] + mp_value = re.findall(r"[-+]?\d*\.\d+|\d+", mp) + if mp_value: + dict['melting_point'] = float(mp_value[0]) + except: + continue + if key == 'Dissociation Constants': + try: + for item in dap_data[key]: + if 'pKa' in item['Value']: + pk = dap_data[key]['Value'] + pk_value = re.findall(r"[-+]?\d*\.\d+|\d+", pk) + if pk_value: + dict['high_ionization'] = float(mp_value[0]) + except: + continue + + return cls(**dict) + +class CosingInfo(BaseModel): + cas : List[str] = Field(default_factory=list) + common_names : List[str] = Field(default_factory=list) + inci : List[str] = Field(default_factory=list) + annex : List[str] = Field(default_factory=list) + functionName : List[str] = Field(default_factory=list) + otherRestrictions : List[str] = Field(default_factory=list) + cosmeticRestriction : Optional[str] + + @classmethod + def cosing_builder(cls, cosing_data : dict): + cosing_keys = ['nameOfCommonIngredientsGlossary', 'casNo', 'functionName', 'annexNo', 'refNo', 'otherRestrictions', 'cosmeticRestriction', 'inciName'] + keys = [k for k in cosing_data.keys() if k in cosing_keys] + + cosing_dict = {} + + for k in keys: + if k == 'nameOfCommonIngredientsGlossary': + names = [] + for name in cosing_data[k]: + names.append(name) + cosing_dict['common_names'] = names + if k == 'inciName': + inci = [] + for inc in cosing_data[k]: + inci.append(inc) + cosing_dict['inci'] = inci + if k == 'casNo': + cas_list = [] + for casNo in cosing_data[k]: + cas_list.append(casNo) + cosing_dict['cas'] = cas_list + if k == 'functionName': + functions = [] + for func in cosing_data[k]: + functions.append(func) + cosing_dict['functionName'] = functions + if k == 'annexNo': + annexes = [] + i = 0 + for ann in cosing_data[k]: + restriction = ann + ' / ' + cosing_data['refNo'][i] + annexes.append(restriction) + i = i+1 + cosing_dict['annex'] = annexes + if k == 'otherRestrictions': + other_restrictions = [] + for ores in cosing_data[k]: + other_restrictions.append(ores) + cosing_dict['otherRestrictions'] = other_restrictions + if k == 'cosmeticRestriction': + cosing_dict['cosmeticRestriction'] = cosing_data[k] + + return cls(**cosing_dict) + + @classmethod + def cycle_identified(cls, cosing_data : dict): + cosing_entries = [] + if 'identifiedIngredient' in cosing_data.keys(): + identified_cosing = cls.cosing_builder(cosing_data['identifiedIngredient']) + cosing_entries.append(identified_cosing) + main = cls.cosing_builder(cosing_data) + cosing_entries.append(main) + + return cosing_entries + +class ToxIndicator(BaseModel): + indicator : str + value : int + unit : str + route : str + toxicity_type : Optional[str] = None + ref : Optional[str] = None + + @property + def priority_rank(self): + """Returns the numerical priority based on the toxicological indicator.""" + mapping = { + 'LD50': 1, + 'DL50': 1, + 'LOAEL': 3, + 'NOAEL': 4 + } + return mapping.get(self.indicator, -1) + + @property + def factor(self): + """Returns the factor based on the toxicity type.""" + if self.priority_rank == 1: + return 10 + elif self.priority_rank == 3: + return 3 + return 1 + +class Toxicity(BaseModel): + cas: str + indicators: list[ToxIndicator] + best_case: Optional[ToxIndicator] = None + factor: Optional[int] = None + + @model_validator(mode='after') + def set_best_case(self) -> 'Toxicity': + if self.indicators: + self.best_case = max(self.indicators, key=lambda x: x.priority_rank) + self.factor = self.best_case.factor + return self + + @classmethod + def from_result(cls, cas: str, result): + toxicity_types = ['repeated_dose_toxicity', 'acute_toxicity'] + indicators_list = [] + + for tt in toxicity_types: + if tt not in result: + continue + + try: + extractor = at_extractor if tt == 'acute_toxicity' else rdt_extractor + fetch = extract_levels(result[tt], extractor=extractor) + + link = result.get(f"{tt}_link", "") + + for key, lvl in fetch.items(): + lvl['ref'] = link + elem = ToxIndicator(**lvl) + indicators_list.append(elem) + + except Exception as e: + print(f"Errore durante l'estrazione di {tt}: {e}") + continue + + return cls( + cas=cas, + indicators=indicators_list + ) class Ingredient(BaseModel): - - inci_name : str = Annotated[ - str, - StringConstraints( - min_length=3, - max_length=50, - strip_whitespace=True, - to_upper=True) - ] + cas: str + inci: Optional[List[str]] = None + dap_info: Optional[DapInfo] = None + cosing_info: Optional[List[CosingInfo]] = None + toxicity: Optional[Toxicity] = None + creation_date: Optional[str] = None - cas : str = Annotated[str, StringConstraints( - min_length=5, - max_length=13, - strip_whitespace=True - )] - - quantity : float = Annotated[float, Field(gt=0.001, lt=100.0, allow_inf_nan = False)] - - # pubchem data x dap - mol_weight : Optional[int] - degree_ioniz : Optional[bool] - log_pow : Optional[int] - melting_pnt : Optional[int] - - # toxicity values - sed : Optional[float] - dap : float = 0.5 - sedd : Optional[float] - noael : Optional[int] - mos : Optional[int] - - # riferimenti - ref : Optional[str] - restriction: Optional[str] - -class ExpositionInfo(BaseModel): - type: CosmeticType - target_population: NormalUser - consumer_weight: str = "60 kg" - place_application: PlaceApplication - routes_exposure: RoutesExposure - nano_routes: NanoRoutes - surface_area: int - frequency: int + @classmethod + def ingredient_builder( + cls, + cas: str, + inci: Optional[List[str]] = None, + dap_data: Optional[dict] = None, + cosing_data: Optional[dict] = None, + toxicity_data: Optional[dict] = None): + + dap_info = DapInfo.dap_builder(dap_data) if dap_data else None + cosing_info = CosingInfo.cycle_identified(cosing_data) if cosing_data else None + toxicity = Toxicity.from_result(cas, toxicity_data) if toxicity_data else None + + return cls( + cas=cas, + inci=inci, + dap_info=dap_info, + cosing_info=cosing_info, + toxicity=toxicity + ) - # to be approximated by LLM - estimated_daily_amount_applied: float - relative_daily_amount_applied: float - retention_factor: float - calculated_daily_exposure: float - calculated_relative_daily_exposure: float + @model_validator(mode='after') + def set_creation_date(self) -> 'Ingredient': + self.creation_date = dt.now().isoformat() + return self + + def update_ingredient(self, attr : str, data : dict): + setattr(self, attr, data) + + def to_mongo_dict(self): + mongo_dict = self.model_dump() + return mongo_dict + + def get_stats(self): + stats = { + "has_dap_info": self.dap_info is not None, + "has_cosing_info": self.cosing_info is not None, + "has_toxicity_info": self.toxicity is not None, + "num_tox_indicators": len(self.toxicity.indicators) if self.toxicity else 0, + "has_best_tox_indicator": self.toxicity.best_case is not None if self.toxicity else False, + "has_restrictions_in_cosing": any(self.cosing_info[0].annex) if self.cosing_info else False, + "has_noael_indicator": any(ind.indicator == 'NOAEL' for ind in self.toxicity.indicators) if self.toxicity else False, + "has_ld50_indicator": any(ind.indicator == 'LD50' for ind in self.toxicity.indicators) if self.toxicity else False, + "has_loael_indicator": any(ind.indicator == 'LOAEL' for ind in self.toxicity.indicators) if self.toxicity else False + } + return stats + + def is_old(self, threshold_days: int = 365) -> bool: + if not self.creation_date: + return True + + creation_dt = dt.fromisoformat(self.creation_date) + current_dt = dt.now() + delta = current_dt - creation_dt + + return delta.days > threshold_days + + def add_inci_name(self, inci_name: str): + if self.inci is None: + self.inci = [] + if inci_name not in self.inci: + self.inci.append(inci_name) + + def return_best_toxicity(self) -> Optional[ToxIndicator]: + if self.toxicity and self.toxicity.best_case: + return self.toxicity.best_case + return None + + def return_cosing_restrictions(self) -> List[str]: + restrictions = [] + if self.cosing_info: + for cosing in self.cosing_info: + restrictions.extend(cosing.annex) + return restrictions + +class RetentionFactors: + LEAVE_ON = 1.0 + RINSE_OFF = 0.01 + DENTIFRICE = 0.05 + MOUTHWASH = 0.10 + DYE = 0.10 -class SedTable(BaseModel): - surface : int - total_exposition : int - frequency : int - retention : int - consumer_weight : int - total_sed : int +class Esposition(BaseModel): + preset_name : str + tipo_prodotto: str + popolazione_target: str = "Adulti" + peso_target_kg: float = 60.0 + + luogo_applicazione: str + esp_normali: List[str] + esp_secondarie: List[str] + esp_nano: List[str] + + sup_esposta: int = Field(ge=1, le=17500, description="Area di applicazione in cm2") + freq_applicazione: int = Field(default=1, description="Numero di applicazioni al giorno") + qta_giornaliera: float = Field(..., description="Quantità di prodotto applicata (g/die)") + ritenzione: float = Field(default=1.0, ge=0, le=1.0, description="Fattore di ritenzione") -class ProdCompany(BaseModel): - prod_company_name : str - prod_vat: int - prod_address : str \ No newline at end of file + note: Optional[str] = None + + @field_validator('esp_normali', 'esp_secondarie', 'esp_nano', mode='before') + @classmethod + def parse_postgres_array(cls, v): + # Se Postgres restituisce una stringa tipo '{a,b}' la trasformiamo in ['a','b'] + if isinstance(v, str): + cleaned = v.strip('{}[]') + return [item.strip() for item in cleaned.split(',')] if cleaned else [] + return v + + @computed_field + @property + def esposizione_calcolata(self) -> float: + return self.qta_giornaliera * self.ritenzione + + @computed_field + @property + def esposizione_relativa(self) -> float: + return (self.esposizione_calcolata * 1000) / self.peso_target_kg + + def save_to_postgres(self): + data = self.model_dump(mode='json') + query = """INSERT INTO tipi_prodotti ( + preset_name, tipo_prodotto, luogo_applicazione, + esp_normali, esp_secondarie, esp_nano, + sup_esposta, freq_applicazione, qta_giornaliera, ritenzione + ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id_preset;""" + + conn = postgres_connect() + try: + with conn.cursor() as cur: + cur.execute(query, ( + data.get("preset_name"), data.get("tipo_prodotto"), + data.get("luogo_applicazione"), data.get("esp_normali"), + data.get("esp_secondarie"), data.get("esp_nano"), + data.get("sup_esposta"), data.get("freq_applicazione"), + data.get("qta_giornaliera"), data.get("ritenzione") + )) + result = cur.fetchone() + conn.commit() + return result[0] if result else None + except Exception as e: + print(f"Errore salvataggio: {e}") + conn.rollback() + return False + finally: + conn.close() + + @classmethod + def get_presets(cls): + conn = postgres_connect() + try: + with conn.cursor() as cur: + cur.execute("SELECT preset_name, tipo_prodotto, luogo_applicazione, esp_normali, esp_secondarie, esp_nano, sup_esposta, freq_applicazione, qta_giornaliera, ritenzione FROM tipi_prodotti;") + results = cur.fetchall() + + lista_oggetti = [] + for r in results: + obj = cls( + preset_name=r[0], + tipo_prodotto=r[1], + luogo_applicazione=r[2], + esp_normali=r[3], + esp_secondarie=r[4], + esp_nano=r[5], + sup_esposta=r[6], + freq_applicazione=r[7], + qta_giornaliera=r[8], + ritenzione=r[9] + ) + lista_oggetti.append(obj) + return lista_oggetti + except Exception as e: + print(f"Errore: {e}") + return [] + finally: + conn.close() \ No newline at end of file diff --git a/src/pif_compiler/classes/pif_class.py b/src/pif_compiler/classes/pif_class.py deleted file mode 100644 index 0dd9d61..0000000 --- a/src/pif_compiler/classes/pif_class.py +++ /dev/null @@ -1,36 +0,0 @@ -from typing import List, Optional -from datetime import datetime -from pydantic import BaseModel, Field - -from pif_compiler.classes.base_classes import ExpositionInfo, SedTable, ProdCompany, Ingredient -from pif_compiler.classes.types_enum import CosmeticType, PhysicalForm, NormalUser - -class PIF(BaseModel): - # INFORMAZIONI GENERALI DEL PRODOTTO - - # Data di esecuzione pif = datetime.now() - created_at: datetime = Field( - default_factory=lambda: datetime.strptime( - datetime.now().strftime('%Y-%m-%d'), - '%Y-%m-%d' - ) - ) - - # Informazioni del prodotto - company: str - product_name: str - type: CosmeticType - physical_form: PhysicalForm - CNCP: int - production_company: ProdCompany - - # Ingredienti - ingredients: List[Ingredient] # str = quantità decimale % - normal_consumer: Optional[NormalUser] - exposition: Optional[ExpositionInfo] - - # Informazioni di sicurezza - sed_table: Optional[SedTable] = None - undesired_effets: Optional[str] = None - description: Optional[str] = None - warnings: Optional[List[str]] = None \ No newline at end of file diff --git a/src/pif_compiler/classes/types_enum.py b/src/pif_compiler/classes/types_enum.py deleted file mode 100644 index ec95918..0000000 --- a/src/pif_compiler/classes/types_enum.py +++ /dev/null @@ -1,145 +0,0 @@ -from enum import Enum - -class TranslatedEnum(str, Enum): - def get_translation(self, lang: str) -> str: - translations = self.value.split("|") - return translations[0] if lang == "en" else translations[1] - -class PhysicalForm(TranslatedEnum): - SIERO = "Serum (liquid)|Siero (liquido)" - LOZIONE = "Lotion (liquid)|Lozione (liquido)" - CREMA = "Cream (liquid)|Crema (liquido)" - OLIO = "Oil (liquid)|Olio (liquido)" - GEL = "Gel (liquid)|Gel (liquido)" - SCHIUMA = "Foam (liquid)|Schiuma (liquido)" - SOLUZIONE = "Solution (liquid)|Soluzione (liquido)" - EMULSIONE = "Emulsion (liquid)|Emulsione (liquido)" - SOSPENSIONE = "Suspension (liquid)|Sospensione (liquido)" - BALSAMO = "Balm (semi-solid)|Balsamo (semi-solido)" - PASTA = "Paste (semi-solid)|Pasta (semi-solido)" - UNGENTO = "Ointment (semi-solid)|Unguento (semi-solido)" - POLVERE_COMPATTA = "Pressed Powder (solid)|Polvere compatta (solido)" - POLVERE_LIBERA = "Loose Powder (solid)|Polvere libera (solido)" - STICK = "Stick (solid)|Stick (solido)" - BARRETTA = "Bar (solid)|Barretta (solido)" - PERLE = "Beads/Pearls (solid)|Perle (solido)" - SPRAY = "Spray/Mist (aerosol)|Spray/Nebulizzatore (aerosol)" - AEROSOL = "Aerosol (aerosol)|Aerosol (aerosol)" - SPRAY_IN_POLVERE = "Powder Spray (aerosol)|Spray in polvere (aerosol)" - CUSCINETTO = "Cushion (hybrid)|Cuscinetto (ibrido)" - GELATINA = "Jelly (hybrid)|Gelatina (ibrido)" - PRODOTTO_BIFASICO = "Bi-Phase Product (hybrid)|Prodotto bifasico (ibrido)" - MICROINCAPSULATO = "Encapsulated Actives (hybrid)|Attivi microincapsulati (ibrido)" - -class CosmeticType(TranslatedEnum): - LIQUID_FOUNDATION = "Liquid foundation|Fondotinta liquido" - POWDER_FOUNDATION = "Powder foundation|Fondotinta in polvere" - BB_CREAM = "BB cream|BB cream" - CC_CREAM = "CC cream|CC cream" - CONCEALER = "Concealer|Correttore" - LOOSE_POWDER = "Loose powder|Cipria in polvere" - PRESSED_POWDER = "Pressed powder|Cipria compatta" - POWDER_BLUSH = "Powder blush|Blush in polvere" - CREAM_BLUSH = "Cream blush|Blush in crema" - LIQUID_BLUSH = "Liquid blush|Blush liquido" - BRONZER = "Bronzer|Bronzer" - HIGHLIGHTER = "Highlighter|Illuminante" - FACE_PRIMER = "Face primer|Primer viso" - SETTING_SPRAY = "Setting spray|Spray fissante" - COLOR_CORRECTOR = "Color corrector|Correttore colorato" - CONTOUR_POWDER = "Contour powder|Contouring in polvere" - CONTOUR_CREAM = "Contour cream|Contouring in crema" - TINTED_MOISTURIZER = "Tinted moisturizer|Crema colorata" - POWDER_EYESHADOW = "Powder eyeshadow|Ombretto in polvere" - CREAM_EYESHADOW = "Cream eyeshadow|Ombretto in crema" - LIQUID_EYESHADOW = "Liquid eyeshadow|Ombretto liquido" - PENCIL_EYELINER = "Pencil eyeliner|Matita occhi" - LIQUID_EYELINER = "Liquid eyeliner|Eyeliner liquido" - GEL_EYELINER = "Gel eyeliner|Eyeliner in gel" - KOHL_LINER = "Kohl liner|Matita kohl" - MASCARA = "Mascara|Mascara" - WATERPROOF_MASCARA = "Waterproof mascara|Mascara waterproof" - BROW_PENCIL = "Eyebrow pencil|Matita sopracciglia" - BROW_GEL = "Eyebrow gel|Gel sopracciglia" - BROW_POWDER = "Eyebrow powder|Polvere sopracciglia" - EYE_PRIMER = "Eye primer|Primer occhi" - FALSE_LASHES = "False eyelashes|Ciglia finte" - LASH_GLUE = "Eyelash glue|Colla ciglia" - BROW_POMADE = "Eyebrow pomade|Pomata sopracciglia" - MATTE_LIPSTICK = "Matte lipstick|Rossetto opaco" - CREAM_LIPSTICK = "Cream lipstick|Rossetto cremoso" - SATIN_LIPSTICK = "Satin lipstick|Rossetto satinato" - LIP_GLOSS = "Lip gloss|Lucidalabbra" - LIP_LINER = "Lip liner|Matita labbra" - LIP_STAIN = "Lip stain|Tinta labbra" - LIP_BALM = "Lip balm|Balsamo labbra" - LIP_PRIMER = "Lip primer|Primer labbra" - LIP_PLUMPER = "Lip plumper|Volumizzante labbra" - LIP_OIL = "Lip oil|Olio labbra" - LIP_MASK = "Lip mask|Maschera labbra" - LIQUID_LIPSTICK = "Liquid lipstick|Rossetto liquido" - GEL_CLEANSER = "Gel cleanser|Detergente gel" - FOAM_CLEANSER = "Foam cleanser|Detergente schiumoso" - OIL_CLEANSER = "Oil cleanser|Detergente oleoso" - CREAM_CLEANSER = "Cream cleanser|Detergente in crema" - MICELLAR_WATER = "Micellar water|Acqua micellare" - TONER = "Toner|Tonico" - ESSENCE = "Essence|Essenza" - SERUM = "Serum|Siero" - MOISTURIZER = "Moisturizer|Idratante" - FACE_OIL = "Face oil|Olio viso" - SHEET_MASK = "Sheet mask|Maschera in tessuto" - CLAY_MASK = "Clay mask|Maschera all'argilla" - GEL_MASK = "Gel mask|Maschera in gel" - CREAM_MASK = "Cream mask|Maschera in crema" - EYE_CREAM = "Eye cream|Crema contorno occhi" - PHYSICAL_EXFOLIATOR = "Physical exfoliator|Esfoliante fisico" - CHEMICAL_EXFOLIATOR = "Chemical exfoliator|Esfoliante chimico" - SUNSCREEN = "Sunscreen|Protezione solare" - NIGHT_CREAM = "Night cream|Crema notte" - FACE_MIST = "Face mist|Acqua spray" - SPOT_TREATMENT = "Spot treatment|Trattamento localizzato" - PORE_STRIPS = "Pore strips|Cerotti purificanti" - PEELING_GEL = "Peeling gel|Gel esfoliante" - BASE_COAT = "Base coat|Base smalto" - NAIL_POLISH = "Nail polish|Smalto" - TOP_COAT = "Top coat|Top coat" - CUTICLE_OIL = "Cuticle oil|Olio cuticole" - NAIL_STRENGTHENER = "Nail strengthener|Rinforzante unghie" - QUICK_DRY_DROPS = "Quick dry drops|Gocce asciugatura rapida" - NAIL_PRIMER = "Nail primer|Primer unghie" - GEL_POLISH = "Gel polish|Smalto gel" - ACRYLIC_POWDER = "Acrylic powder|Polvere acrilica" - NAIL_GLUE = "Nail glue|Colla unghie" - MAKEUP_BRUSHES = "Makeup brushes|Pennelli trucco" - MAKEUP_SPONGES = "Makeup sponges|Spugnette trucco" - EYELASH_CURLER = "Eyelash curler|Piegaciglia" - TWEEZERS = "Tweezers|Pinzette" - NAIL_CLIPPERS = "Nail clippers|Tagliaunghie" - NAIL_FILE = "Nail file|Lima unghie" - COTTON_PADS = "Cotton pads|Dischetti di cotone" - MAKEUP_REMOVER_PADS = "Makeup remover pads|Dischetti struccanti" - POWDER_PUFF = "Powder puff|Piumino cipria" - FACIAL_ROLLER = "Facial roller|Rullo facciale" - GUA_SHA = "Gua sha tool|Strumento gua sha" - BRUSH_CLEANER = "Brush cleaner|Detergente pennelli" - MAKEUP_ORGANIZER = "Makeup organizer|Organizzatore trucchi" - MIRROR = "Mirror|Specchio" - NAIL_BUFFER = "Nail buffer|Buffer unghie" - -class NormalUser(TranslatedEnum): - ADULTO = "Adult|Adulto" - BAMBINO = "Child|Bambino" - -class PlaceApplication(TranslatedEnum): - VISO = "Face|Viso" - -class RoutesExposure(TranslatedEnum): - DERMAL = "Dermal|Dermale" - OCULAR = "Ocular|Oculare" - ORAL = "Oral|Orale" - -class NanoRoutes(TranslatedEnum): - DERMAL = "Dermal|Dermale" - OCULAR = "Ocular|Oculare" - ORAL = "Oral|Orale" \ No newline at end of file diff --git a/src/pif_compiler/functions/db_utils.py b/src/pif_compiler/functions/db_utils.py index 9b1fc58..0b7cce1 100644 --- a/src/pif_compiler/functions/db_utils.py +++ b/src/pif_compiler/functions/db_utils.py @@ -2,6 +2,7 @@ import os from urllib.parse import quote_plus from dotenv import load_dotenv +import psycopg2 from pymongo import MongoClient from pif_compiler.functions.common_log import get_logger @@ -40,9 +41,31 @@ def db_connect(db_name : str = 'toxinfo', collection_name : str = 'substance_ind return collection +def postgres_connect(): + DATABASE_URL = os.getenv("DATABASE_URL") + with psycopg2.connect(DATABASE_URL) as conn: + return conn + +def insert_compilatore(nome_compilatore): + try: + conn = postgres_connect() + with conn.cursor() as cur: + cur.execute("INSERT INTO compilatori (nome_compilatore) VALUES (%s)", (nome_compilatore,)) + conn.commit() + conn.close() + except Exception as e: + logger.error(f"Error: {e}") + +def log_ricerche(cas, target, esito): + try: + conn = postgres_connect() + with conn.cursor() as cur: + cur.execute("INSERT INTO logs.search_history (cas_ricercato, target, esito) VALUES (%s, %s, %s)", (cas, target, esito)) + conn.commit() + conn.close() + except Exception as e: + logger.error(f"Error: {e}") + return + if __name__ == "__main__": - coll = db_connect() - if coll is not None: - logger.info("Database connection successful.") - else: - logger.error("Database connection failed.") \ No newline at end of file + log_ricerche("123-45-6", "ECHA", True) \ No newline at end of file diff --git a/src/pif_compiler/functions/orders.py b/src/pif_compiler/functions/orders.py deleted file mode 100644 index 68b3969..0000000 --- a/src/pif_compiler/functions/orders.py +++ /dev/null @@ -1,6 +0,0 @@ -from pymongo import MongoClient - -from pif_compiler.functions.common_log import get_logger -from pif_compiler.functions.db_utils import db_connect - -log = get_logger() \ No newline at end of file diff --git a/src/pif_compiler/services/srv_cosing.py b/src/pif_compiler/services/srv_cosing.py index f40adb5..0e8540e 100644 --- a/src/pif_compiler/services/srv_cosing.py +++ b/src/pif_compiler/services/srv_cosing.py @@ -1,247 +1,141 @@ import json as js import re import requests as req -from typing import Union +from typing import Union, List, Dict, Optional from pif_compiler.functions.common_log import get_logger logger = get_logger() -#region Funzione che processa una lista di CAS presa da Cosing +# --- PARSING --- -def parse_cas_numbers(cas_string:list) -> list: - logger.debug(f"Parsing CAS numbers from input: {cas_string}") - - # Siccome ci assicuriamo esternamente che esista almeno un cas possiamo prendere la stringa - cas_string = cas_string[0] - logger.debug(f"Extracted CAS string: {cas_string}") - - # Rimuoviamo parentesi e il loro contenuto - cas_string = re.sub(r"\([^)]*\)", "", cas_string) - logger.debug(f"After removing parentheses: {cas_string}") - - # Eseguiamo uno split su vari possibili separatori - cas_parts = re.split(r"[/;,]", cas_string) - - # Otteniamo una lista utilizzando i cas precedenti e rimuovendo spazi in eccesso - cas_list = [cas.strip() for cas in cas_parts] - logger.debug(f"CAS list after splitting: {cas_list}") - - # Alcuni cas sono divisi da un doppio dash (--) che dobbiamo rimuovere - # è però necessario farlo ora in seconda battuta +def parse_cas_numbers(cas_string: list) -> list: + logger.debug(f"Parsing CAS numbers: {cas_string}") + + cas_raw = cas_string[0] + cas_raw = re.sub(r"\([^)]*\)", "", cas_raw) + + cas_parts = re.split(r"[/;,]", cas_raw) + cas_list = [cas.strip() for cas in cas_parts if cas.strip()] if len(cas_list) == 1 and "--" in cas_list[0]: - logger.debug("Found double dash separator, splitting further") cas_list = [cas.strip() for cas in cas_list[0].split("--")] - # Siccome alcuni cas hanno valori non validi ("-") li cerchiamo e rimuoviamo - if cas_list: - - while "-" in cas_list: - logger.debug("Removing invalid CAS value: '-'") - cas_list.remove("-") - + cas_list = [cas for cas in cas_list if cas != "-"] + logger.info(f"Parsed CAS numbers: {cas_list}") return cas_list -#endregion -#region Funzione per eseguire una ricerca direttamente sul cosing +# --- SEARCH --- -# Il primo argomento è la stringa da cercare, il secondo indica il tipo di ricerca -def cosing_search(text : str, mode : str = "name") -> Union[list,dict,None]: +def cosing_search(text: str, mode: str = "name") -> Union[list, dict, None]: logger.info(f"Starting COSING search: text='{text}', mode='{mode}'") - cosing_post_req = "https://api.tech.ec.europa.eu/search-api/prod/rest/search?apiKey=285a77fd-1257-4271-8507-f0c6b2961203&text=*&pageSize=100&pageNumber=1" + url = "https://api.tech.ec.europa.eu/search-api/prod/rest/search?apiKey=285a77fd-1257-4271-8507-f0c6b2961203&text=*&pageSize=100&pageNumber=1" agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0" - # La modalità di ricerca base è quella per nome, che sia INCI o di altro tipo if mode == "name": - logger.debug("Search mode: name (INCI, chemical name, etc.)") - search_query = {"bool": - {"must":[ - {"text": - {"query":f"{text}","fields": - ["inciName.exact","inciUsaName","innName.exact","phEurName","chemicalName","chemicalDescription"], - "defaultOperator":"AND"}}]}} - - # In caso di ricerca per numero cas o EC il payload della richiesta è diverso - elif mode in ["cas","ec"]: - logger.debug(f"Search mode: {mode}") - search_query = {"bool": {"must": [{"text": {"query": f"*{text}*","fields": ["casNo", "ecNo"]}}]}} - - # La ricerca per ID è necessaria dove serva recuperare gli identified ingredients + search_query = { + "bool": { + "must": [{ + "text": { + "query": f"{text}", + "fields": ["inciName.exact", "inciUsaName", "innName.exact", "phEurName", "chemicalName", "chemicalDescription"], + "defaultOperator": "AND" + } + }] + } + } + elif mode in ["cas", "ec"]: + search_query = {"bool": {"must": [{"text": {"query": f"*{text}*", "fields": ["casNo", "ecNo"]}}]}} elif mode == "id": - logger.debug("Search mode: substance ID") - search_query = {"bool":{"must":[{"term":{"substanceId":f"{text}"}}]}} - - # Se la mode inserita non è prevista lancio un errore + search_query = {"bool": {"must": [{"term": {"substanceId": f"{text}"}}]}} else: logger.error(f"Invalid search mode: {mode}") raise ValueError(f"Invalid search mode: {mode}") - # Creo il payload della mia request - files = {"query": ("query",js.dumps(search_query),"application/json")} - logger.debug(f"Search query: {search_query}") + files = {"query": ("query", js.dumps(search_query), "application/json")} - # Eseguo la post di ricerca try: - logger.debug(f"Sending POST request to COSING API") - risposta = req.post(cosing_post_req,headers={"user_agent":agent,"Connection":"keep-alive"},files=files) + risposta = req.post(url, headers={"user_agent": agent, "Connection": "keep-alive"}, files=files) risposta.raise_for_status() - risposta = risposta.json() + data = risposta.json() - if risposta["results"]: - logger.info(f"COSING search successful: found {len(risposta['results'])} result(s)") - logger.debug(f"First result substance ID: {risposta['results'][0]['metadata'].get('substanceId', 'N/A')}") - return risposta["results"][0]["metadata"] + if data.get("results"): + logger.info(f"COSING search successful") + return data["results"][0]["metadata"] else: - # La funzione ritorna None quando non ho risultati dalla mia ricerca - logger.warning(f"COSING search returned no results for text='{text}', mode='{mode}'") + logger.warning(f"No results found") return None except req.exceptions.RequestException as e: - logger.error(f"HTTP request error during COSING search: {e}") + logger.error(f"HTTP request error: {e}") raise except (KeyError, ValueError, TypeError) as e: - logger.error(f"Error parsing COSING response: {e}") + logger.error(f"Parsing error: {e}") raise -#endregion -#region Funzione per pulire un json cosing e restituirlo +# --- CLEANING --- -def clean_cosing(json : dict, full : bool = True) -> dict: - substance_id = json.get("substanceId", ["Unknown"])[0] if json.get("substanceId") else "Unknown" - logger.info(f"Cleaning COSING data for substance ID: {substance_id}, full={full}") - - # Definisco i campi di nostro interesse e li divido in base al tipo di output finale che desidero +def clean_cosing(json_data: dict, full: bool = True) -> dict: + substance_id = json_data.get("substanceId", ["Unknown"])[0] if json_data.get("substanceId") else "Unknown" + logger.info(f"Cleaning COSING data for: {substance_id}") string_cols = [ - "itemType", - "nameOfCommonIngredientsGlossary", - "inciName", - "phEurName", - "chemicalName", - "innName", - "substanceId", - "refNo" - ] + "itemType", "phEurName", "chemicalName", "innName", "substanceId", "cosmeticRestriction" + ] list_cols = [ - "casNo", - "ecNo", - "functionName", - "otherRestrictions", - "sccsOpinion", - "sccsOpinionUrls", - "identifiedIngredient", - "annexNo", - "otherRegulations" - ] - - # Creo una lista con tutti i campi su cui ciclare - - total_keys = string_cols + list_cols - - # Definisco la base dell"url che mi serve per ottenere il link cosing della sostanza + "casNo", "ecNo", "functionName", "otherRestrictions", "refNo", + "sccsOpinion", "sccsOpinionUrls", "identifiedIngredient", + "annexNo", "otherRegulations", "nameOfCommonIngredientsGlossary", "inciName" + ] base_url = "https://ec.europa.eu/growth/tools-databases/cosing/details/" clean_json = {} - # Ciclo su tutti i campi di interesse - - for key in total_keys: - - # Alcuni campi contengono una dicitura inutile che occupa solo spazio - # per cui provvedo a rimuoverla - - while "" in json[key]: - json[key].remove("") - - # Se il campo dovrà avere in output una lista, posso anche accettare le liste vuote del cosing come valore + for key in (string_cols + list_cols): + current_val = json_data.get(key, []) + filtered_val = [v for v in current_val if v != ""] if key in list_cols: - value = json[key] - - # Il cas e l"ec sono casi speciali, per cui eseguo delle funzioni in più quando lo tratto - - if key in ["casNo", "ecNo"]: - if value: - logger.debug(f"Processing {key}: {value}") - value = parse_cas_numbers(value) - - # Completiamo direttamente il json di ritorno ove presenti degli identifiedIngredient, - # solo dove il flag "full" è true - - elif key == "identifiedIngredient": - if full: - if value: - logger.debug(f"Processing {len(value)} identified ingredient(s)") - value = identified_ingredients(value) - - clean_json[key] = value - + if key in ["casNo", "ecNo"] and filtered_val: + filtered_val = parse_cas_numbers(filtered_val) + elif key == "identifiedIngredient" and full and filtered_val: + filtered_val = identified_ingredients(filtered_val) + + clean_json[key] = filtered_val else: + nKey = "commonName" if key == "nameOfCommonIngredientsGlossary" else key + clean_json[nKey] = filtered_val[0] if filtered_val else "" - # Questo nome di campo era troppo lungo e ho preferito semplificarlo - - if key == "nameOfCommonIngredientsGlossary": - nKey = "commonName" - - # Dovendo rinominare alcuni campi, negli altri casi copio il nome iniziale - - else: - nKey = key - - # Siccome voglio in output una stringa semplice e se usassi uno slicer su di una lista vuota - # devo prima verificare che la lista cosing contenga dei valori - - if json[key]: - clean_json[nKey] = json[key][0] - else: - clean_json[nKey] = "" - - # Il campo cosingUrl non esiste ancora, vado a crearlo unendo il substance ID all"url base - - clean_json["cosingUrl"] = f"{base_url}{json["substanceId"][0]}" - logger.debug(f"Generated COSING URL: {clean_json['cosingUrl']}") - logger.info(f"Successfully cleaned COSING data for substance ID: {substance_id}") - + clean_json["cosingUrl"] = f"{base_url}{json_data['substanceId'][0]}" return clean_json -#endregion - -#region Funzione per completare, se necessario, un json cosing - -def identified_ingredients(id_list : list) -> list: - logger.info(f"Processing {len(id_list)} identified ingredient(s): {id_list}") +def identified_ingredients(id_list: list) -> list: + logger.info(f"Processing identified ingredients: {id_list}") identified = [] - # Per ognuno degli ingredienti in IdentifiedIngredients eseguo una ricerca - - for id in id_list: - logger.debug(f"Searching for identified ingredient with ID: {id}") - - ingredient = cosing_search(id,"id") - + for sub_id in id_list: + ingredient = cosing_search(sub_id, "id") if ingredient: - - # Vado a pulire i json appena trovati - - ingredient = clean_cosing(ingredient,full=False) - - # Ora salvo nella lista il documento pulito - - identified.append(ingredient) - logger.debug(f"Successfully added identified ingredient ID: {id}") - else: - logger.warning(f"Could not find identified ingredient with ID: {id}") - - # Finito di popolare la lista con gli oggetti degli identifiedIngredient, la ritorno - - logger.info(f"Successfully processed {len(identified)} of {len(id_list)} identified ingredient(s)") + identified.append(clean_cosing(ingredient, full=False)) + return identified -#endregion + +def cosing_entry(cas: str) -> Optional[dict]: + logger.info(f"Retrieving COSING entry for CAS: {cas}") + try: + search_result = cosing_search(cas, mode="cas") + if search_result: + return clean_cosing(search_result) + else: + logger.warning(f"No COSING entry found for CAS: {cas}") + return None + except Exception as e: + logger.error(f"Error retrieving COSING entry for CAS {cas}: {e}") + return None if __name__ == "__main__": - print(cosing_search("Water","name")) - - \ No newline at end of file + raw = cosing_search("72-48-0", "cas") + clean = clean_cosing(raw) + print(clean) \ No newline at end of file diff --git a/src/pif_compiler/services/srv_echa.py b/src/pif_compiler/services/srv_echa.py index 57d2b85..9262956 100644 --- a/src/pif_compiler/services/srv_echa.py +++ b/src/pif_compiler/services/srv_echa.py @@ -6,9 +6,10 @@ import re from bs4 import BeautifulSoup from dotenv import load_dotenv from playwright.sync_api import sync_playwright +from typing import Callable, Any from pif_compiler.functions.common_log import get_logger -from pif_compiler.functions.db_utils import db_connect +from pif_compiler.functions.db_utils import db_connect, log_ricerche log = get_logger() load_dotenv() @@ -311,6 +312,114 @@ def parse_toxicology_html(html_content): return result + +def parse_value_with_unit(value_str: str) -> tuple: + """Parse a combined value+unit string like '5,040mg/kg bw/day' into (value, unit).""" + if not value_str: + return ("", "") + + # Pattern to match numeric value (with commas/decimals) followed by unit + match = re.match(r'^([\d,\.]+)\s*(.*)$', value_str.strip()) + if match: + numeric_value = match.group(1).replace(',', '') # Remove commas + unit = match.group(2).strip() + return (numeric_value, unit) + return (value_str, "") + + +def extract_levels(data: dict | list, extractor: Callable[[dict, list], dict | None]) -> dict: + """ + Generic function to recursively extract data from nested JSON structures. + + Args: + data: The JSON data (dict or list) to parse + extractor: A callable that receives (obj, path) and returns: + - A dict with extracted data if the object matches criteria + - None if no match (continue searching) + The 'path' is a list of labels encountered in the hierarchy. + + Returns: + A dict keyed by context path (labels joined by " > "), with values + being whatever the extractor returns. + + Example: + def my_extractor(obj, path): + if obj.get("SomeField"): + return {"field": obj["SomeField"]} + return None + + results = extract_from_json(data, my_extractor) + """ + results = {} + + def recurse(obj: Any, path: list = None): + if path is None: + path = [] + + if isinstance(obj, dict): + # Check for label to use in path + current_label = obj.get("label", "") + current_path = path + [current_label] if current_label else path + + # Call the extractor function + extracted = extractor(obj, current_path) + if extracted is not None: + key = " > ".join(filter(None, current_path)) or "root" + results[key] = extracted + + # Recurse into all values + for k, val in obj.items(): + if k != "label": + recurse(val, current_path) + + elif isinstance(obj, list): + for item in obj: + recurse(item, path) + + recurse(data) + return results + + +def rdt_extractor(obj: dict, path: list) -> dict | None: + indicator = obj.get("EffectLevelUnit", "").strip() + value_str = obj.get("EffectLevelValue", "").strip() + + if indicator and value_str: + numeric_value, unit = parse_value_with_unit(value_str) + + # Extract route (last label) and toxicity_type (second-to-last label) + filtered_path = [p for p in path if p] + route = filtered_path[-1] if len(filtered_path) >= 1 else "" + toxicity_type = filtered_path[-2] if len(filtered_path) >= 2 else "" + + return { + "indicator": indicator, + "value": numeric_value, + "unit": unit, + "route": route, + "toxicity_type": toxicity_type + } + return None + +def at_extractor(obj: dict, path: list) -> dict | None: + indicator = obj.get("EffectLevelUnit", "").strip() + value_str = obj.get("EffectLevelValue", "").strip() + + if indicator and value_str: + numeric_value, unit = parse_value_with_unit(value_str) + + filtered_path = [p for p in path if p] + route = filtered_path[-1] if len(filtered_path) >= 1 else "" + route = route.replace("Acute toxicity: via ", "").strip() + + return { + "indicator": indicator, + "value": numeric_value, + "unit": unit, + "route": route + } + return None + #endregion #region Orchestrator functions @@ -418,16 +527,19 @@ def orchestrator(cas: str) -> dict: local_record = check_local(cas_validated) if local_record: log.info(f"Returning local record for CAS {cas}.") + log_ricerche(cas, 'ECHA', True) return local_record else: log.info(f"No local record, starting echa flow") echa_data = echa_flow(cas_validated) if echa_data: log.info(f"Echa flow successful") + log_ricerche(cas, 'ECHA', True) add_to_local(echa_data) return echa_data else: log.error(f"Failed to retrieve ECHA data for CAS {cas}.") + log_ricerche(cas, 'ECHA', False) return None # to do: check if document is complete @@ -436,5 +548,4 @@ def orchestrator(cas: str) -> dict: if __name__ == "__main__": cas_test = "113170-55-1" - result = orchestrator(cas_test) - print(result) \ No newline at end of file + result = orchestrator(cas_test) \ No newline at end of file diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index adc226c..0000000 --- a/tests/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# PIF Compiler - Test Suite - -## Overview - -Comprehensive test suite for the PIF Compiler project using `pytest`. - -## Structure - -``` -tests/ -├── __init__.py # Test package marker -├── conftest.py # Shared fixtures and configuration -├── test_cosing_service.py # COSING service tests -├── test_models.py # (TODO) Pydantic model tests -├── test_echa_service.py # (TODO) ECHA service tests -└── README.md # This file -``` - -## Installation - -```bash -# Install test dependencies -uv add --dev pytest pytest-cov pytest-mock - -# Or manually install -uv pip install pytest pytest-cov pytest-mock -``` - -## Running Tests - -### Run All Tests (Unit only) -```bash -uv run pytest -``` - -### Run Specific Test File -```bash -uv run pytest tests/test_cosing_service.py -``` - -### Run Specific Test Class -```bash -uv run pytest tests/test_cosing_service.py::TestParseCasNumbers -``` - -### Run Specific Test -```bash -uv run pytest tests/test_cosing_service.py::TestParseCasNumbers::test_single_cas_number -``` - -### Run with Verbose Output -```bash -uv run pytest -v -``` - -### Run with Coverage Report -```bash -uv run pytest --cov=src/pif_compiler --cov-report=html -# Open htmlcov/index.html in browser -``` - -## Test Categories - -### Unit Tests (Default) -Fast tests with no external dependencies. Run by default. - -```bash -uv run pytest -m unit -``` - -### Integration Tests -Tests that hit real APIs or databases. Skipped by default. - -```bash -uv run pytest -m integration -``` - -### Slow Tests -Tests that take longer to run. Skipped by default. - -```bash -uv run pytest -m slow -``` - -### Database Tests -Tests requiring MongoDB. Ensure Docker is running. - -```bash -cd utils -docker-compose up -d -uv run pytest -m database -``` - -## Test Organization - -### `test_cosing_service.py` - -**Coverage:** -- ✅ `parse_cas_numbers()` - CAS parsing logic - - Single/multiple CAS - - Different separators (/, ;, ,, --) - - Parentheses removal - - Whitespace handling - - Invalid dash removal - -- ✅ `cosing_search()` - API search - - Search by name - - Search by CAS - - Search by EC number - - Search by ID - - No results handling - - Invalid mode error - -- ✅ `clean_cosing()` - JSON cleaning - - Basic field cleaning - - Empty tag removal - - CAS parsing - - URL creation - - Field renaming - -- ✅ Integration tests (marked as `@pytest.mark.integration`) - - Real API calls (requires internet) - -## Writing New Tests - -### Example Unit Test - -```python -class TestMyFunction: - """Test my_function.""" - - def test_basic_case(self): - """Test basic functionality.""" - result = my_function("input") - assert result == "expected" - - def test_edge_case(self): - """Test edge case handling.""" - with pytest.raises(ValueError): - my_function("invalid") -``` - -### Example Mock Test - -```python -from unittest.mock import Mock, patch - -@patch('module.external_api_call') -def test_with_mock(mock_api): - """Test with mocked external call.""" - mock_api.return_value = {"data": "mocked"} - result = my_function() - assert result == "expected" - mock_api.assert_called_once() -``` - -### Example Fixture Usage - -```python -def test_with_fixture(sample_cosing_response): - """Test using a fixture from conftest.py.""" - result = clean_cosing(sample_cosing_response) - assert "cosingUrl" in result -``` - -## Best Practices - -1. **Naming**: Test files/classes/functions start with `test_` -2. **Arrange-Act-Assert**: Structure tests clearly -3. **One assertion focus**: Each test should test one thing -4. **Use fixtures**: Reuse test data via `conftest.py` -5. **Mock external calls**: Don't hit real APIs in unit tests -6. **Mark appropriately**: Use `@pytest.mark.integration` for slow tests -7. **Descriptive names**: Test names should describe what they test - -## Common Commands - -```bash -# Run fast tests only (skip integration/slow) -uv run pytest -m "not integration and not slow" - -# Run only integration tests -uv run pytest -m integration - -# Run with detailed output -uv run pytest -vv - -# Stop at first failure -uv run pytest -x - -# Run last failed tests -uv run pytest --lf - -# Run tests matching pattern -uv run pytest -k "test_parse" - -# Generate coverage report -uv run pytest --cov=src/pif_compiler --cov-report=term-missing -``` - -## CI/CD Integration - -For GitHub Actions (example): - -```yaml -- name: Run tests - run: | - uv run pytest -m "not integration" --cov --cov-report=xml -``` - -## TODO - -- [ ] Add tests for `models.py` (Pydantic validation) -- [ ] Add tests for `echa_service.py` -- [ ] Add tests for `echa_parser.py` -- [ ] Add tests for `echa_extractor.py` -- [ ] Add tests for `database_service.py` -- [ ] Add tests for `pubchem_service.py` -- [ ] Add integration tests with test database -- [ ] Set up GitHub Actions CI diff --git a/tests/RUN_TESTS.md b/tests/RUN_TESTS.md deleted file mode 100644 index e559c63..0000000 --- a/tests/RUN_TESTS.md +++ /dev/null @@ -1,86 +0,0 @@ -# Quick Start - Running Tests - -## 1. Install Test Dependencies - -```bash -# Add pytest and related tools -uv add --dev pytest pytest-cov pytest-mock -``` - -## 2. Run the Tests - -```bash -# Run all unit tests (fast, no API calls) -uv run pytest - -# Run with more detail -uv run pytest -v - -# Run just the COSING tests -uv run pytest tests/test_cosing_service.py - -# Run integration tests (will hit real COSING API) -uv run pytest -m integration -``` - -## 3. See Coverage - -```bash -# Generate HTML coverage report -uv run pytest --cov=src/pif_compiler --cov-report=html - -# Open htmlcov/index.html in your browser -``` - -## What the Tests Cover - -### ✅ `parse_cas_numbers()` -- Parses single CAS: `["7732-18-5"]` → `["7732-18-5"]` -- Parses multiple: `["7732-18-5/56-81-5"]` → `["7732-18-5", "56-81-5"]` -- Handles separators: `/`, `;`, `,`, `--` -- Removes parentheses: `["7732-18-5 (hydrate)"]` → `["7732-18-5"]` -- Cleans whitespace and invalid dashes - -### ✅ `cosing_search()` -- Mocks API calls (no internet needed for unit tests) -- Tests search by name, CAS, EC, ID -- Tests error handling -- Integration tests hit real API - -### ✅ `clean_cosing()` -- Cleans COSING JSON responses -- Removes empty tags -- Parses CAS numbers -- Creates COSING URLs -- Renames fields - -## Test Results Example - -``` -tests/test_cosing_service.py::TestParseCasNumbers::test_single_cas_number PASSED -tests/test_cosing_service.py::TestParseCasNumbers::test_multiple_cas_with_slash PASSED -tests/test_cosing_service.py::TestCosingSearch::test_search_by_name_success PASSED -... -================================ 25 passed in 0.5s ================================ -``` - -## Troubleshooting - -### Import errors -Make sure you're in the project root: -```bash -cd c:\Users\adish\Projects\pif_compiler -uv run pytest -``` - -### Mock not found -Install pytest-mock: -```bash -uv add --dev pytest-mock -``` - -### Integration tests failing -These hit the real API and need internet. Skip them: -```bash -uv run pytest -m "not integration" -``` diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index b79213c..0000000 --- a/tests/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -PIF Compiler - Test Suite -""" diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index d1f14b2..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,247 +0,0 @@ -""" -Pytest configuration and fixtures for PIF Compiler tests. - -This file contains shared fixtures and configuration for all tests. -""" - -import pytest -import sys -from pathlib import Path - -# Add src to Python path for imports -src_path = Path(__file__).parent.parent / "src" -sys.path.insert(0, str(src_path)) - - -# Sample data fixtures -@pytest.fixture -def sample_cas_numbers(): - """Real CAS numbers for testing common cosmetic ingredients.""" - return { - "water": "7732-18-5", - "glycerin": "56-81-5", - "sodium_hyaluronate": "9067-32-7", - "niacinamide": "98-92-0", - "ascorbic_acid": "50-81-7", - "retinol": "68-26-8", - "lanolin": "85507-69-3", - "sodium_chloride": "7647-14-5", - "propylene_glycol": "57-55-6", - "butylene_glycol": "107-88-0", - "salicylic_acid": "69-72-7", - "tocopherol": "59-02-9", - "caffeine": "58-08-2", - "citric_acid": "77-92-9", - "hyaluronic_acid": "9004-61-9", - "sodium_hyaluronate_crosspolymer": "63148-62-9", - "zinc_oxide": "1314-13-2", - "titanium_dioxide": "13463-67-7", - "lactic_acid": "50-21-5", - "lanolin_oil": "8006-54-0", - } - - -@pytest.fixture -def sample_cosing_response(): - """Sample COSING API response for testing.""" - return { - "inciName": ["WATER"], - "casNo": ["7732-18-5"], - "ecNo": ["231-791-2"], - "substanceId": ["12345"], - "itemType": ["Ingredient"], - "functionName": ["Solvent"], - "chemicalName": ["Dihydrogen monoxide"], - "nameOfCommonIngredientsGlossary": ["Water"], - "sccsOpinion": [], - "sccsOpinionUrls": [], - "otherRestrictions": [], - "identifiedIngredient": [], - "annexNo": [], - "otherRegulations": [], - "refNo": ["REF123"], - "phEurName": [], - "innName": [] - } - - -@pytest.fixture -def sample_ingredient_data(): - """Sample ingredient data for Pydantic model testing.""" - return { - "inci_name": "WATER", - "cas": "7732-18-5", - "quantity": 70.0, - "mol_weight": 18, - "dap": 0.5, - } - - -@pytest.fixture -def sample_pif_data(): - """Sample PIF data for testing.""" - return { - "company": "Beauty Corp", - "product_name": "Face Cream", - "type": "MOISTURIZER", - "physical_form": "CREMA", - "CNCP": 123456, - "production_company": { - "prod_company_name": "Manufacturer Inc", - "prod_vat": 12345678, - "prod_address": "123 Main St, City, Country" - }, - "ingredients": [ - { - "inci_name": "WATER", - "cas": "7732-18-5", - "quantity": 70.0, - "dap": 0.5 - }, - { - "inci_name": "GLYCERIN", - "cas": "56-81-5", - "quantity": 10.0, - "dap": 0.5 - } - ] - } - - -@pytest.fixture -def sample_echa_substance_response(): - """Sample ECHA substance search API response for glycerin.""" - return { - "items": [{ - "substanceIndex": { - "rmlId": "100.029.181", - "rmlName": "glycerol", - "rmlCas": "56-81-5", - "rmlEc": "200-289-5" - } - }] - } - - -@pytest.fixture -def sample_echa_substance_response_water(): - """Sample ECHA substance search API response for water.""" - return { - "items": [{ - "substanceIndex": { - "rmlId": "100.028.902", - "rmlName": "water", - "rmlCas": "7732-18-5", - "rmlEc": "231-791-2" - } - }] - } - - -@pytest.fixture -def sample_echa_substance_response_niacinamide(): - """Sample ECHA substance search API response for niacinamide.""" - return { - "items": [{ - "substanceIndex": { - "rmlId": "100.002.530", - "rmlName": "nicotinamide", - "rmlCas": "98-92-0", - "rmlEc": "202-713-4" - } - }] - } - - -@pytest.fixture -def sample_echa_dossier_response(): - """Sample ECHA dossier list API response.""" - return { - "items": [{ - "assetExternalId": "abc123def456", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - -@pytest.fixture -def sample_echa_index_html_full(): - """Sample ECHA index.html with all toxicology sections.""" - return """ - - ECHA Dossier - -
- -
-
- -
-
- -
- - - """ - - -@pytest.fixture -def sample_echa_index_html_partial(): - """Sample ECHA index.html with only ToxSummary section.""" - return """ - - ECHA Dossier - -
- -
- - - """ - - -@pytest.fixture -def sample_echa_index_html_empty(): - """Sample ECHA index.html with no toxicology sections.""" - return """ - - ECHA Dossier - -

No toxicology information available

- - - """ - - -# Skip markers -def pytest_configure(config): - """Configure custom markers.""" - config.addinivalue_line( - "markers", "unit: mark test as a unit test (fast, no external deps)" - ) - config.addinivalue_line( - "markers", "integration: mark test as integration test (may use real APIs)" - ) - config.addinivalue_line( - "markers", "slow: mark test as slow (skip by default)" - ) - config.addinivalue_line( - "markers", "database: mark test as requiring database" - ) - - -def pytest_collection_modifyitems(config, items): - """Modify test collection to skip slow/integration tests by default.""" - skip_slow = pytest.mark.skip(reason="Slow test (use -m slow to run)") - skip_integration = pytest.mark.skip(reason="Integration test (use -m integration to run)") - - # Only skip if not explicitly requested - run_slow = config.getoption("-m") == "slow" - run_integration = config.getoption("-m") == "integration" - - for item in items: - if "slow" in item.keywords and not run_slow: - item.add_marker(skip_slow) - if "integration" in item.keywords and not run_integration: - item.add_marker(skip_integration) diff --git a/tests/test_cosing_service.py b/tests/test_cosing_service.py deleted file mode 100644 index 6b9ef38..0000000 --- a/tests/test_cosing_service.py +++ /dev/null @@ -1,254 +0,0 @@ -""" -Tests for COSING Service - -Test coverage: -- parse_cas_numbers: CAS number parsing logic -- cosing_search: API search functionality -- clean_cosing: JSON cleaning and formatting -""" - -import pytest -from unittest.mock import Mock, patch -from pif_compiler.services.srv_cosing import ( - parse_cas_numbers, - cosing_search, - clean_cosing, -) - - -class TestParseCasNumbers: - """Test CAS number parsing function.""" - - def test_single_cas_number(self): - """Test parsing a single CAS number.""" - result = parse_cas_numbers(["7732-18-5"]) - assert result == ["7732-18-5"] - - def test_multiple_cas_with_slash(self): - """Test parsing multiple CAS numbers separated by slash.""" - result = parse_cas_numbers(["7732-18-5/56-81-5"]) - assert result == ["7732-18-5", "56-81-5"] - - def test_multiple_cas_with_semicolon(self): - """Test parsing multiple CAS numbers separated by semicolon.""" - result = parse_cas_numbers(["7732-18-5;56-81-5"]) - assert result == ["7732-18-5", "56-81-5"] - - def test_multiple_cas_with_comma(self): - """Test parsing multiple CAS numbers separated by comma.""" - result = parse_cas_numbers(["7732-18-5,56-81-5"]) - assert result == ["7732-18-5", "56-81-5"] - - def test_double_dash_separator(self): - """Test parsing CAS numbers with double dash separator.""" - result = parse_cas_numbers(["7732-18-5--56-81-5"]) - assert result == ["7732-18-5", "56-81-5"] - - def test_cas_with_parentheses(self): - """Test that parenthetical info is removed.""" - result = parse_cas_numbers(["7732-18-5 (hydrate)"]) - assert result == ["7732-18-5"] - - def test_cas_with_extra_whitespace(self): - """Test that extra whitespace is trimmed.""" - result = parse_cas_numbers([" 7732-18-5 / 56-81-5 "]) - assert result == ["7732-18-5", "56-81-5"] - - def test_removes_invalid_dash(self): - """Test that standalone dashes are removed.""" - result = parse_cas_numbers(["7732-18-5/-/56-81-5"]) - assert result == ["7732-18-5", "56-81-5"] - - def test_complex_mixed_separators(self): - """Test with multiple separator types.""" - result = parse_cas_numbers(["7732-18-5/56-81-5;50-00-0"]) - assert result == ["7732-18-5", "56-81-5", "50-00-0"] - - -class TestCosingSearch: - """Test COSING API search functionality.""" - - @patch('pif_compiler.services.cosing_service.req.post') - def test_search_by_name_success(self, mock_post): - """Test successful search by ingredient name.""" - # Mock API response - mock_response = Mock() - mock_response.json.return_value = { - "results": [{ - "metadata": { - "inciName": ["WATER"], - "casNo": ["7732-18-5"], - "substanceId": ["12345"] - } - }] - } - mock_post.return_value = mock_response - - result = cosing_search("WATER", mode="name") - - assert result is not None - assert result["inciName"] == ["WATER"] - assert result["casNo"] == ["7732-18-5"] - - @patch('pif_compiler.services.cosing_service.req.post') - def test_search_by_cas_success(self, mock_post): - """Test successful search by CAS number.""" - mock_response = Mock() - mock_response.json.return_value = { - "results": [{ - "metadata": { - "inciName": ["WATER"], - "casNo": ["7732-18-5"] - } - }] - } - mock_post.return_value = mock_response - - result = cosing_search("7732-18-5", mode="cas") - - assert result is not None - assert "7732-18-5" in result["casNo"] - - @patch('pif_compiler.services.cosing_service.req.post') - def test_search_by_ec_success(self, mock_post): - """Test successful search by EC number.""" - mock_response = Mock() - mock_response.json.return_value = { - "results": [{ - "metadata": { - "ecNo": ["231-791-2"] - } - }] - } - mock_post.return_value = mock_response - - result = cosing_search("231-791-2", mode="ec") - - assert result is not None - assert "231-791-2" in result["ecNo"] - - @patch('pif_compiler.services.cosing_service.req.post') - def test_search_by_id_success(self, mock_post): - """Test successful search by substance ID.""" - mock_response = Mock() - mock_response.json.return_value = { - "results": [{ - "metadata": { - "substanceId": ["12345"] - } - }] - } - mock_post.return_value = mock_response - - result = cosing_search("12345", mode="id") - - assert result is not None - assert result["substanceId"] == ["12345"] - - @patch('pif_compiler.services.cosing_service.req.post') - def test_search_no_results(self, mock_post): - """Test search with no results returns status code.""" - mock_response = Mock() - mock_response.json.return_value = {"results": []} - mock_post.return_value = mock_response - - result = cosing_search("NONEXISTENT", mode="name") - assert result == None # Should return None - - def test_search_invalid_mode(self): - """Test that invalid mode raises ValueError.""" - with pytest.raises(ValueError): - cosing_search("WATER", mode="invalid_mode") - - -class TestCleanCosing: - """Test COSING JSON cleaning function.""" - - def test_clean_basic_fields(self, sample_cosing_response): - """Test cleaning basic string and list fields.""" - - result = clean_cosing(sample_cosing_response, full=False) - - assert result["inciName"] == "WATER" - assert result["casNo"] == ["7732-18-5"] - assert result["ecNo"] == ["231-791-2"] - - def test_removes_empty_tags(self, sample_cosing_response): - """Test that tags are removed.""" - - sample_cosing_response["inciName"] = [""] - sample_cosing_response["functionName"] = [""] - - result = clean_cosing(sample_cosing_response, full=False) - - assert "" not in result["inciName"] - assert result["functionName"] == [] - - def test_parses_cas_numbers(self, sample_cosing_response): - """Test that CAS numbers are parsed correctly.""" - sample_cosing_response["casNo"] = ["56-81-5"] - - result = clean_cosing(sample_cosing_response, full=False) - - assert result["casNo"] == ["56-81-5"] - - def test_creates_cosing_url(self, sample_cosing_response): - """Test that COSING URL is created.""" - result = clean_cosing(sample_cosing_response, full=False) - - assert "cosingUrl" in result - assert "12345" in result["cosingUrl"] - assert result["cosingUrl"] == "https://ec.europa.eu/growth/tools-databases/cosing/details/12345" - - def test_renames_common_name(self, sample_cosing_response): - """Test that nameOfCommonIngredientsGlossary is renamed.""" - result = clean_cosing(sample_cosing_response, full=False) - - assert "commonName" in result - assert result["commonName"] == "Water" - assert "nameOfCommonIngredientsGlossary" not in result - - def test_empty_lists_handled(self, sample_cosing_response): - """Test that empty lists are handled correctly.""" - sample_cosing_response["inciName"] = [] - sample_cosing_response["casNo"] = [] - - result = clean_cosing(sample_cosing_response, full=False) - - assert result["inciName"] == "" - assert result["casNo"] == [] - - -class TestIntegration: - """Integration tests with real API (marked as slow).""" - - @pytest.mark.integration - def test_real_water_search(self): - """Test real API call for WATER (requires internet).""" - result = cosing_search("WATER", mode="name") - - if result and isinstance(result, dict): - # Real API call succeeded - assert "inciName" in result or "casNo" in result - - @pytest.mark.integration - def test_real_cas_search(self): - """Test real API call by CAS number (requires internet).""" - result = cosing_search("56-81-5", mode="cas") - - if result and isinstance(result, dict): - assert "casNo" in result - - @pytest.mark.integration - def test_full_workflow(self): - """Test complete workflow: search -> clean.""" - # Search for glycerin - raw_result = cosing_search("GLYCERIN", mode="name") - - if raw_result and isinstance(raw_result, dict): - # Clean the result - clean_result = clean_cosing(raw_result, full=False) - - # Verify cleaned structure - assert "cosingUrl" in clean_result - assert isinstance(clean_result.get("casNo"), list) diff --git a/tests/test_echa_find.py b/tests/test_echa_find.py deleted file mode 100644 index c5f63b6..0000000 --- a/tests/test_echa_find.py +++ /dev/null @@ -1,857 +0,0 @@ -""" -Tests for ECHA Find Service - -Test coverage: -- search_dossier: Complete workflow for searching ECHA dossiers - - Substance search (by CAS, EC, rmlName) - - Dossier retrieval (Active/Inactive) - - HTML parsing for toxicology sections - - Error handling and edge cases -""" - -import pytest -from unittest.mock import Mock, patch, MagicMock -from datetime import datetime -from pif_compiler.services.echa_find import search_dossier - - -class TestSearchDossierSubstanceSearch: - """Test the initial substance search phase of search_dossier.""" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_successful_cas_search(self, mock_get): - """Test successful search by CAS number.""" - # Mock the substance search API response - mock_response = Mock() - mock_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - mock_get.return_value = mock_response - - # Mocking all subsequent calls - with patch('pif_compiler.services.echa_find.requests.get') as mock_all_gets: - # First call: substance search (already mocked above) - # Second call: dossier list - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - # Third call: index.html page - mock_index_response = Mock() - mock_index_response.text = """ - -
- -
-
- -
-
- -
- - """ - - mock_all_gets.side_effect = [ - mock_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert result["rmlCas"] == "50-00-0" - assert result["rmlName"] == "Test Substance" - assert result["rmlId"] == "100.000.001" - assert result["rmlEc"] == "200-001-8" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_successful_ec_search(self, mock_get): - """Test successful search by EC number.""" - mock_response = Mock() - mock_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - mock_get.return_value = mock_response - - with patch('pif_compiler.services.echa_find.requests.get') as mock_all_gets: - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_all_gets.side_effect = [ - mock_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("200-001-8", input_type="rmlEc") - - assert result is not False - assert result["rmlEc"] == "200-001-8" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_successful_name_search(self, mock_get): - """Test successful search by substance name.""" - mock_response = Mock() - mock_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "formaldehyde", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - mock_get.return_value = mock_response - - with patch('pif_compiler.services.echa_find.requests.get') as mock_all_gets: - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_all_gets.side_effect = [ - mock_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("formaldehyde", input_type="rmlName") - - assert result is not False - assert result["rmlName"] == "formaldehyde" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_substance_not_found(self, mock_get): - """Test when substance is not found in ECHA.""" - mock_response = Mock() - mock_response.json.return_value = {"items": []} - mock_get.return_value = mock_response - - result = search_dossier("999-99-9", input_type="rmlCas") - - assert result is False - - @patch('pif_compiler.services.echa_find.requests.get') - def test_empty_items_array(self, mock_get): - """Test when API returns empty items array.""" - mock_response = Mock() - mock_response.json.return_value = {"items": []} - mock_get.return_value = mock_response - - result = search_dossier("NONEXISTENT", input_type="rmlName") - - assert result is False - - @patch('pif_compiler.services.echa_find.requests.get') - def test_malformed_api_response(self, mock_get): - """Test when API response is malformed.""" - mock_response = Mock() - mock_response.json.return_value = {} # Missing 'items' key - mock_get.return_value = mock_response - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is False - - -class TestSearchDossierInputTypeValidation: - """Test input_type parameter validation.""" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_input_type_mismatch_cas(self, mock_get): - """Test when input_type doesn't match actual search result (CAS).""" - mock_response = Mock() - mock_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - mock_get.return_value = mock_response - - # Search with CAS but specify wrong input_type - result = search_dossier("50-00-0", input_type="rmlEc") - - assert isinstance(result, str) - assert "search_error" in result - assert "not equal" in result - - @patch('pif_compiler.services.echa_find.requests.get') - def test_input_type_correct_match(self, mock_get): - """Test when input_type correctly matches search result.""" - mock_response = Mock() - mock_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - mock_get.return_value = mock_response - - with patch('pif_compiler.services.echa_find.requests.get') as mock_all_gets: - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_all_gets.side_effect = [ - mock_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert isinstance(result, dict) - - -class TestSearchDossierDossierRetrieval: - """Test dossier retrieval (Active/Inactive).""" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_active_dossier_found(self, mock_get): - """Test when active dossier is found.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert result["dossierType"] == "Active" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_inactive_dossier_fallback(self, mock_get): - """Test when only inactive dossier exists (fallback).""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - # First dossier call returns empty (no active) - mock_active_dossier_response = Mock() - mock_active_dossier_response.json.return_value = {"items": []} - - # Second dossier call returns inactive - mock_inactive_dossier_response = Mock() - mock_inactive_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_get.side_effect = [ - mock_substance_response, - mock_active_dossier_response, - mock_inactive_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert result["dossierType"] == "Inactive" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_no_dossiers_found(self, mock_get): - """Test when no dossiers (active or inactive) are found.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - # Both active and inactive return empty - mock_empty_response = Mock() - mock_empty_response.json.return_value = {"items": []} - - mock_get.side_effect = [ - mock_substance_response, - mock_empty_response, # Active - mock_empty_response # Inactive - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is False - - @patch('pif_compiler.services.echa_find.requests.get') - def test_last_update_date_parsed(self, mock_get): - """Test that lastUpdateDate is correctly parsed.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert "lastUpdateDate" in result - assert result["lastUpdateDate"] == "2024-01-15" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_missing_last_update_date(self, mock_get): - """Test when lastUpdateDate is missing from response.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123" - # lastUpdatedDate missing - }] - } - - mock_index_response = Mock() - mock_index_response.text = "
" - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - # Should still work, just without lastUpdateDate - assert "lastUpdateDate" not in result - - -class TestSearchDossierHTMLParsing: - """Test HTML parsing for toxicology sections.""" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_all_tox_sections_found(self, mock_get): - """Test when all toxicology sections are found.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = """ - -
- -
-
- -
-
- -
- - """ - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert "ToxSummary" in result - assert "AcuteToxicity" in result - assert "RepeatedDose" in result - assert "tox_summary_001" in result["ToxSummary"] - assert "acute_tox_001" in result["AcuteToxicity"] - assert "repeated_dose_001" in result["RepeatedDose"] - - @patch('pif_compiler.services.echa_find.requests.get') - def test_only_tox_summary_found(self, mock_get): - """Test when only ToxSummary section exists.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = """ - -
- -
- - """ - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert "ToxSummary" in result - assert "AcuteToxicity" not in result - assert "RepeatedDose" not in result - - @patch('pif_compiler.services.echa_find.requests.get') - def test_no_tox_sections_found(self, mock_get): - """Test when no toxicology sections are found.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "No toxicology sections" - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert "ToxSummary" not in result - assert "AcuteToxicity" not in result - assert "RepeatedDose" not in result - # Basic info should still be present - assert "rmlId" in result - assert "index" in result - - @patch('pif_compiler.services.echa_find.requests.get') - def test_js_links_created(self, mock_get): - """Test that both HTML and JS links are created.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = """ - -
- -
-
- -
- - """ - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert "ToxSummary" in result - assert "ToxSummary_js" in result - assert "AcuteToxicity" in result - assert "AcuteToxicity_js" in result - assert "index" in result - assert "index_js" in result - - -class TestSearchDossierURLConstruction: - """Test URL construction for various endpoints.""" - - @patch('pif_compiler.services.echa_find.requests.get') - def test_search_response_url(self, mock_get): - """Test that search_response URL is correctly constructed.""" - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": "Test Substance", - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "" - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier("50-00-0", input_type="rmlCas") - - assert result is not False - assert "search_response" in result - assert "50-00-0" in result["search_response"] - assert "https://chem.echa.europa.eu/api-substance/v1/substance" in result["search_response"] - - @patch('pif_compiler.services.echa_find.requests.get') - def test_url_encoding(self, mock_get): - """Test that special characters in substance names are URL-encoded.""" - substance_name = "test substance with spaces" - - mock_substance_response = Mock() - mock_substance_response.json.return_value = { - "items": [{ - "substanceIndex": { - "rmlId": "100.000.001", - "rmlName": substance_name, - "rmlCas": "50-00-0", - "rmlEc": "200-001-8" - } - }] - } - - mock_dossier_response = Mock() - mock_dossier_response.json.return_value = { - "items": [{ - "assetExternalId": "abc123", - "rootKey": "key123", - "lastUpdatedDate": "2024-01-15T10:30:00Z" - }] - } - - mock_index_response = Mock() - mock_index_response.text = "" - - mock_get.side_effect = [ - mock_substance_response, - mock_dossier_response, - mock_index_response - ] - - result = search_dossier(substance_name, input_type="rmlName") - - assert result is not False - assert "search_response" in result - # Spaces should be encoded - assert "%20" in result["search_response"] or "+" in result["search_response"] - - -class TestIntegration: - """Integration tests with real API (marked as integration).""" - - @pytest.mark.integration - def test_real_formaldehyde_search(self): - """Test real API call for formaldehyde (requires internet).""" - result = search_dossier("50-00-0", input_type="rmlCas") - - if result and isinstance(result, dict): - # Real API call succeeded - assert "rmlId" in result - assert "rmlName" in result - assert "rmlCas" in result - assert result["rmlCas"] == "50-00-0" - assert "index" in result - assert "dossierType" in result - - @pytest.mark.integration - def test_real_water_search(self): - """Test real API call for water by CAS (requires internet).""" - result = search_dossier("7732-18-5", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "7732-18-5" - - @pytest.mark.integration - def test_real_nonexistent_substance(self): - """Test real API call for non-existent substance (requires internet).""" - result = search_dossier("999-99-9", input_type="rmlCas") - - # Should return False for non-existent substance - assert result is False or isinstance(result, str) - - @pytest.mark.integration - def test_real_glycerin_search(self): - """Test real API call for glycerin (requires internet).""" - result = search_dossier("56-81-5", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "56-81-5" - assert "rmlId" in result - assert "dossierType" in result - - @pytest.mark.integration - def test_real_niacinamide_search(self): - """Test real API call for niacinamide (requires internet).""" - result = search_dossier("98-92-0", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "98-92-0" - - @pytest.mark.integration - def test_real_retinol_search(self): - """Test real API call for retinol (requires internet).""" - result = search_dossier("68-26-8", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "68-26-8" - - @pytest.mark.integration - def test_real_caffeine_search(self): - """Test real API call for caffeine (requires internet).""" - result = search_dossier("58-08-2", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "58-08-2" - - @pytest.mark.integration - def test_real_salicylic_acid_search(self): - """Test real API call for salicylic acid (requires internet).""" - result = search_dossier("69-72-7", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "69-72-7" - - @pytest.mark.integration - def test_real_titanium_dioxide_search(self): - """Test real API call for titanium dioxide (requires internet).""" - result = search_dossier("13463-67-7", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "13463-67-7" - - @pytest.mark.integration - def test_real_zinc_oxide_search(self): - """Test real API call for zinc oxide (requires internet).""" - result = search_dossier("1314-13-2", input_type="rmlCas") - - if result and isinstance(result, dict): - assert "rmlCas" in result - assert result["rmlCas"] == "1314-13-2" - - @pytest.mark.integration - def test_multiple_cosmetic_ingredients(self, sample_cas_numbers): - """Test real API calls for multiple cosmetic ingredients (requires internet).""" - # Test a subset of common cosmetic ingredients - test_ingredients = [ - ("water", "7732-18-5"), - ("glycerin", "56-81-5"), - ("propylene_glycol", "57-55-6"), - ] - - for name, cas in test_ingredients: - result = search_dossier(cas, input_type="rmlCas") - if result and isinstance(result, dict): - assert result["rmlCas"] == cas - assert "rmlId" in result - # Give the API some time between requests - import time - time.sleep(0.5) diff --git a/utils/README.md b/utils/README.md deleted file mode 100644 index ca0c7c8..0000000 --- a/utils/README.md +++ /dev/null @@ -1,153 +0,0 @@ -# PIF Compiler - MongoDB Docker Setup - -## Quick Start - -Start MongoDB and Mongo Express web interface: - -```bash -cd utils -docker-compose up -d -``` - -Stop the services: - -```bash -docker-compose down -``` - -Stop and remove all data: - -```bash -docker-compose down -v -``` - -## Services - -### MongoDB -- **Port**: 27017 -- **Database**: toxinfo -- **Username**: admin -- **Password**: admin123 -- **Connection String**: `mongodb://admin:admin123@localhost:27017/toxinfo?authSource=admin` - -### Mongo Express (Web UI) -- **URL**: http://localhost:8082 -- **Username**: admin -- **Password**: admin123 - -## Usage in Python - -Update your MongoDB connection in `src/pif_compiler/functions/mongo_functions.py`: - -```python -# For local development with Docker -db = connect(user="admin", password="admin123", database="toxinfo") -``` - -Or use the connection URI directly: - -```python -from pymongo import MongoClient - -client = MongoClient("mongodb://admin:admin123@localhost:27017/toxinfo?authSource=admin") -db = client['toxinfo'] -``` - -## Data Persistence - -Data is persisted in Docker volumes: -- `mongodb_data` - Database files -- `mongodb_config` - Configuration files - -These volumes persist even when containers are stopped. - -## Creating Application User - -It's recommended to create a dedicated user for your application instead of using the admin account. - -### Option 1: Using mongosh (MongoDB Shell) - -```bash -# Access MongoDB shell -docker exec -it pif_mongodb mongosh -u admin -p admin123 --authenticationDatabase admin - -# In the MongoDB shell, run: -use toxinfo - -db.createUser({ - user: "pif_app", - pwd: "pif_app_password", - roles: [ - { role: "readWrite", db: "toxinfo" } - ] -}) - -# Exit the shell -exit -``` - -### Option 2: Using Mongo Express Web UI - -1. Go to http://localhost:8082 -2. Login with admin/admin123 -3. Select `toxinfo` database -4. Click on "Users" tab -5. Add new user with `readWrite` role - -### Option 3: Using Python Script - -Create a file `utils/create_user.py`: - -```python -from pymongo import MongoClient - -# Connect as admin -client = MongoClient("mongodb://admin:admin123@localhost:27017/?authSource=admin") -db = client['toxinfo'] - -# Create application user -db.command("createUser", "pif_app", - pwd="pif_app_password", - roles=[{"role": "readWrite", "db": "toxinfo"}]) - -print("User 'pif_app' created successfully!") -client.close() -``` - -Run it: -```bash -cd utils -uv run python create_user.py -``` - -### Update Your Application - -After creating the user, update your connection in `src/pif_compiler/functions/mongo_functions.py`: - -```python -# Use application user instead of admin -db = connect(user="pif_app", password="pif_app_password", database="toxinfo") -``` - -Or with connection URI: -```python -client = MongoClient("mongodb://pif_app:pif_app_password@localhost:27017/toxinfo?authSource=toxinfo") -``` - -### Available Roles - -- `read`: Read-only access to the database -- `readWrite`: Read and write access (recommended for your app) -- `dbAdmin`: Database administration (create indexes, etc.) -- `userAdmin`: Manage users and roles - -## Security Notes - -⚠️ **WARNING**: The default credentials are for local development only. - -For production: -1. Change all passwords in `docker-compose.yml` -2. Use environment variables or secrets management -3. Create dedicated users with minimal required permissions -4. Configure firewall rules -5. Enable SSL/TLS connections diff --git a/utils/changelog.py b/utils/changelog.py deleted file mode 100644 index 7a46591..0000000 --- a/utils/changelog.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python3 -""" -Change Log Manager -Manages a change.log file with external and internal changes -""" - -import os -from datetime import datetime -from enum import Enum - -class ChangeType(Enum): - EXTERNAL = "EXTERNAL" - INTERNAL = "INTERNAL" - -class ChangeLogManager: - def __init__(self, log_file="change.log"): - self.log_file = log_file - self._ensure_log_exists() - - def _ensure_log_exists(self): - """Create the log file if it doesn't exist""" - if not os.path.exists(self.log_file): - with open(self.log_file, 'w') as f: - f.write("# Change Log\n") - f.write(f"# Created: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n") - - def add_change(self, change_type, description): - """ - Add a change entry to the log - - Args: - change_type (ChangeType): Type of change (EXTERNAL or INTERNAL) - description (str): Description of the change - """ - timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - entry = f"[{timestamp}] [{change_type.value}] {description}\n" - - with open(self.log_file, 'a') as f: - f.write(entry) - - print(f"✓ Change added: {change_type.value} - {description}") - - def view_log(self, filter_type=None): - """ - View the change log with optional filtering - - Args: - filter_type (ChangeType, optional): Filter by change type - """ - if not os.path.exists(self.log_file): - print("No change log found.") - return - - with open(self.log_file, 'r') as f: - lines = f.readlines() - - print("\n" + "="*70) - print("CHANGE LOG") - print("="*70 + "\n") - - for line in lines: - if filter_type and f"[{filter_type.value}]" not in line: - continue - print(line, end='') - - print("\n" + "="*70 + "\n") - - def get_statistics(self): - """Display statistics about the change log""" - if not os.path.exists(self.log_file): - print("No change log found.") - return - - with open(self.log_file, 'r') as f: - lines = f.readlines() - - external_count = sum(1 for line in lines if "[EXTERNAL]" in line) - internal_count = sum(1 for line in lines if "[INTERNAL]" in line) - total = external_count + internal_count - - print("\n" + "="*40) - print("CHANGE LOG STATISTICS") - print("="*40) - print(f"Total changes: {total}") - print(f"External changes: {external_count}") - print(f"Internal changes: {internal_count}") - print("="*40 + "\n") - - -def main(): - manager = ChangeLogManager() - - while True: - print("\n📝 Change Log Manager") - print("1. Add External Change") - print("2. Add Internal Change") - print("3. View All Changes") - print("4. View External Changes Only") - print("5. View Internal Changes Only") - print("6. Show Statistics") - print("7. Exit") - - choice = input("\nSelect an option (1-7): ").strip() - - if choice == '1': - description = input("Enter external change description: ").strip() - if description: - manager.add_change(ChangeType.EXTERNAL, description) - else: - print("Description cannot be empty.") - - elif choice == '2': - description = input("Enter internal change description: ").strip() - if description: - manager.add_change(ChangeType.INTERNAL, description) - else: - print("Description cannot be empty.") - - elif choice == '3': - manager.view_log() - - elif choice == '4': - manager.view_log(filter_type=ChangeType.EXTERNAL) - - elif choice == '5': - manager.view_log(filter_type=ChangeType.INTERNAL) - - elif choice == '6': - manager.get_statistics() - - elif choice == '7': - print("Goodbye!") - break - - else: - print("Invalid option. Please select 1-7.") - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/utils/create_user.py b/utils/create_user.py deleted file mode 100644 index 51077fc..0000000 --- a/utils/create_user.py +++ /dev/null @@ -1,86 +0,0 @@ -""" -Create MongoDB application user for PIF Compiler. - -This script creates a dedicated user with readWrite permissions -on the toxinfo database instead of using the admin account. -""" - -from pymongo import MongoClient -from pymongo.errors import DuplicateKeyError, OperationFailure -import sys - - -def create_app_user(): - """Create application user for toxinfo database.""" - - # Configuration - ADMIN_USER = "admin" - ADMIN_PASSWORD = "admin123" - MONGO_HOST = "localhost" - MONGO_PORT = 27017 - - APP_USER = "pif_app" - APP_PASSWORD = "marox123" - APP_DATABASE = "pif-projects" - - print(f"Connecting to MongoDB as admin...") - - try: - # Connect as admin - client = MongoClient( - f"mongodb://{ADMIN_USER}:{ADMIN_PASSWORD}@{MONGO_HOST}:{MONGO_PORT}/?authSource=admin", - serverSelectionTimeoutMS=5000 - ) - - # Test connection - client.admin.command('ping') - print("✓ Connected to MongoDB successfully") - - # Switch to application database - db = client[APP_DATABASE] - - # Create application user - print(f"\nCreating user '{APP_USER}' with readWrite permissions on '{APP_DATABASE}'...") - - db.command( - "createUser", - APP_USER, - pwd=APP_PASSWORD, - roles=[{"role": "readWrite", "db": APP_DATABASE}] - ) - - print(f"✓ User '{APP_USER}' created successfully!") - print(f"\nConnection details:") - print(f" Username: {APP_USER}") - print(f" Password: {APP_PASSWORD}") - print(f" Database: {APP_DATABASE}") - print(f" Connection String: mongodb://{APP_USER}:{APP_PASSWORD}@{MONGO_HOST}:{MONGO_PORT}/{APP_DATABASE}?authSource={APP_DATABASE}") - - print(f"\nUpdate your mongo_functions.py with:") - print(f" db = connect(user='{APP_USER}', password='{APP_PASSWORD}', database='{APP_DATABASE}')") - - client.close() - return 0 - - except DuplicateKeyError: - print(f"⚠ User '{APP_USER}' already exists!") - print(f"\nTo delete and recreate, run:") - print(f" docker exec -it pif_mongodb mongosh -u admin -p admin123 --authenticationDatabase admin") - print(f" use {APP_DATABASE}") - print(f" db.dropUser('{APP_USER}')") - return 1 - - except OperationFailure as e: - print(f"✗ MongoDB operation failed: {e}") - return 1 - - except Exception as e: - print(f"✗ Error: {e}") - print("\nMake sure MongoDB is running:") - print(" cd utils") - print(" docker-compose up -d") - return 1 - - -if __name__ == "__main__": - sys.exit(create_app_user()) diff --git a/utils/docker-compose.yml b/utils/docker-compose.yml deleted file mode 100644 index 45d5882..0000000 --- a/utils/docker-compose.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: '3.8' - -services: - mongodb: - image: mongo:latest - container_name: personal_mongodb - restart: unless-stopped - environment: - MONGO_INITDB_ROOT_USERNAME: admin - MONGO_INITDB_ROOT_PASSWORD: bello98A. - MONGO_INITDB_DATABASE: toxinfo - ports: - - "27017:27017" - volumes: - - mongodb_data:/data/db - - mongodb_config:/data/configdb - networks: - - personal_network - -volumes: - mongodb_data: - driver: local - mongodb_config: - driver: local - -networks: - personal_network: - driver: bridge diff --git a/uv.lock b/uv.lock index 8f597aa..29902b6 100644 --- a/uv.lock +++ b/uv.lock @@ -966,6 +966,7 @@ dependencies = [ { name = "markdown-to-json" }, { name = "markdownify" }, { name = "playwright" }, + { name = "psycopg2" }, { name = "pubchemprops" }, { name = "pubchempy" }, { name = "pydantic" }, @@ -990,6 +991,7 @@ requires-dist = [ { name = "markdown-to-json", specifier = ">=2.1.2" }, { name = "markdownify", specifier = ">=1.2.0" }, { name = "playwright", specifier = ">=1.55.0" }, + { name = "psycopg2", specifier = ">=2.9.11" }, { name = "pubchemprops", specifier = ">=0.1.1" }, { name = "pubchempy", specifier = ">=1.0.5" }, { name = "pydantic", specifier = ">=2.11.10" }, @@ -1128,6 +1130,17 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/65/1070a6e3c036f39142c2820c4b52e9243246fcfc3f96239ac84472ba361e/psutil-7.1.0-cp37-abi3-win_arm64.whl", hash = "sha256:6937cb68133e7c97b6cc9649a570c9a18ba0efebed46d8c5dae4c07fa1b67a07", size = 244971 }, ] +[[package]] +name = "psycopg2" +version = "2.9.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/89/8d/9d12bc8677c24dad342ec777529bce705b3e785fa05d85122b5502b9ab55/psycopg2-2.9.11.tar.gz", hash = "sha256:964d31caf728e217c697ff77ea69c2ba0865fa41ec20bb00f0977e62fdcc52e3", size = 379598 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/bf/635fbe5dd10ed200afbbfbe98f8602829252ca1cce81cc48fb25ed8dadc0/psycopg2-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:e03e4a6dbe87ff81540b434f2e5dc2bddad10296db5eea7bdc995bf5f4162938", size = 2713969 }, + { url = "https://files.pythonhosted.org/packages/88/5a/18c8cb13fc6908dc41a483d2c14d927a7a3f29883748747e8cb625da6587/psycopg2-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:8dc379166b5b7d5ea66dcebf433011dfc51a7bb8a5fc12367fa05668e5fc53c8", size = 2714048 }, + { url = "https://files.pythonhosted.org/packages/47/08/737aa39c78d705a7ce58248d00eeba0e9fc36be488f9b672b88736fbb1f7/psycopg2-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:f10a48acba5fe6e312b891f290b4d2ca595fc9a06850fe53320beac353575578", size = 2803738 }, +] + [[package]] name = "pubchemprops" version = "0.1.1"